"""
Improved Stage 2 Training Script

基于train_stage2_unified.py的改进版本，支持：
1. ImprovedROIRefinerModel（动态注意力融合 + 跨ROI自注意力）
2. 位置信息的数据加载
3. 注意力权重的可视化
4. 渐进式训练策略（可选）
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.cuda.amp import autocast, GradScaler
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm import tqdm
import numpy as np
import os
import yaml
import logging
from pathlib import Path
from typing import Optional, Dict, Tuple
import time
import warnings

# Suppress FutureWarning from torch.cuda.amp (PyTorch version compatibility)
warnings.filterwarnings('ignore', category=FutureWarning, module='torch.cuda.amp')

from src.dataset import ROIDataset, create_train_val_split
from src.models.refiner_improved import ImprovedROIRefinerModel
from src.models.refiner_simple import SimpleROIRefinerModel  # Fallback option
from src.config import DEVICE, STAGE2_CONFIG, IMPROVED_MODEL_CONFIG, DATASET_DIR, WEIGHTS_DIR
from src.training.losses import OHEMFocalLoss, BalancedL1Loss
from src.training.ema import ModelEMA, SWA

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='[%(asctime)s] %(levelname)s: %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)


class WarmupCosineScheduler:
    """Learning rate scheduler with warmup and cosine annealing."""

    def __init__(self, optimizer, warmup_epochs: int, total_epochs: int, min_lr: float = 1e-6):
        self.optimizer = optimizer
        self.warmup_epochs = warmup_epochs
        self.total_epochs = total_epochs
        self.min_lr = min_lr
        self.base_lr = optimizer.param_groups[0]['lr']

    def step(self, epoch: int) -> float:
        """Update learning rate for current epoch."""
        if epoch < self.warmup_epochs:
            lr = self.base_lr * (epoch + 1) / self.warmup_epochs
        else:
            progress = (epoch - self.warmup_epochs) / (self.total_epochs - self.warmup_epochs)
            lr = self.min_lr + (self.base_lr - self.min_lr) * 0.5 * (1 + np.cos(np.pi * progress))

        for param_group in self.optimizer.param_groups:
            param_group['lr'] = lr
        return lr


class EarlyStopping:
    """Early stopping handler."""

    def __init__(self, patience: int = 10, min_delta: float = 0.001):
        self.patience = patience
        self.min_delta = min_delta
        self.counter = 0
        self.best_loss = None
        self.early_stop = False

    def __call__(self, val_loss: float) -> bool:
        """Check if training should stop."""
        if self.best_loss is None:
            self.best_loss = val_loss
        elif val_loss > self.best_loss - self.min_delta:
            self.counter += 1
            if self.counter >= self.patience:
                self.early_stop = True
                return True
        else:
            self.best_loss = val_loss
            self.counter = 0
        return False


class ImprovedStage2Trainer:
    """Improved Stage 2 training pipeline with attention mechanisms."""

    def __init__(self, config: Dict, model_config: Dict, use_validation: bool = True):
        """
        Initialize trainer.

        Args:
            config: Training configuration dictionary (STAGE2_CONFIG)
            model_config: Model architecture configuration (IMPROVED_MODEL_CONFIG)
            use_validation: Enable validation during training
        """
        self.config = config
        self.model_config = model_config
        self.use_validation = use_validation

        # Bbox regression loss weight
        self.bbox_loss_weight = config.get('bbox_loss_weight', 2.0)

        logger.info("=" * 80)
        logger.info("Improved Stage 2 Training Pipeline")
        logger.info("=" * 80)
        logger.info(f"Dynamic Fusion: {model_config.get('use_dynamic_fusion', True)}")
        logger.info(f"Cross-ROI Attention: {model_config.get('use_cross_roi_attention', True)}")
        logger.info(f"Validation: {'Enabled' if use_validation else 'Disabled'}")
        logger.info(f"Bbox loss weight: {self.bbox_loss_weight}")

        # Load class information
        with open(os.path.join(DATASET_DIR, 'data.yaml'), 'r') as f:
            self.num_classes = yaml.safe_load(f)['nc']

        # Initialize components
        self._setup_data()
        self._setup_model()
        self._setup_training()

    def _check_cuda_health(self, stage_name):
        """Check CUDA health and report any issues."""
        try:
            if torch.cuda.is_available():
                # Try a simple operation
                test = torch.zeros(1, device='cuda')
                _ = test + 1
                del test
                torch.cuda.synchronize()
                logger.debug(f"✓ CUDA healthy at: {stage_name}")
                return True
        except Exception as e:
            logger.error(f"✗ CUDA unhealthy at {stage_name}: {e}")
            return False
        return True

    def _setup_data(self):
        """Setup datasets and dataloaders."""
        logger.info("Setting up datasets...")
        self._check_cuda_health("before data setup")

        # Define transforms
        train_transform = transforms.Compose([
            transforms.Resize((self.config['roi_size'], self.config['roi_size'])),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomVerticalFlip(p=0.5),
            transforms.RandomRotation(15),
            transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1),
            transforms.RandomAffine(degrees=0, translate=(0.1, 0.1), scale=(0.9, 1.1)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])

        val_transform = transforms.Compose([
            transforms.Resize((self.config['roi_size'], self.config['roi_size'])),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])

        # Setup train/val split if validation enabled
        if self.use_validation:
            proposals_file = self.config['proposals_json']
            train_file, val_file = create_train_val_split(proposals_file, val_ratio=0.2, seed=42)

            self.train_dataset = ROIDataset(train_file, transform=train_transform)
            self.val_dataset = ROIDataset(val_file, transform=val_transform)

            logger.info(f"Train samples: {len(self.train_dataset)}")
            logger.info(f"Val samples: {len(self.val_dataset)}")
        else:
            self.train_dataset = ROIDataset(self.config['proposals_json'], transform=train_transform)
            logger.info(f"Train samples: {len(self.train_dataset)}")

        # Create dataloaders
        # Note: Using num_workers=0 to avoid multiprocessing issues with CUDA
        # If training is slow, can increase to 2-4 after confirming CUDA stability
        num_workers = 0  # Safe default, prevents CUDA multiprocessing issues

        self.train_loader = DataLoader(
            self.train_dataset,
            batch_size=self.config['batch_size'],
            shuffle=True,
            num_workers=num_workers,
            pin_memory=True,
            drop_last=True  # 确保batch size一致（重要！）
        )

        if self.use_validation:
            self.val_loader = DataLoader(
                self.val_dataset,
                batch_size=self.config['batch_size'],
                shuffle=False,
                num_workers=num_workers,
                pin_memory=True,
                drop_last=True  # 确保batch size一致
            )

        logger.info(f"DataLoader num_workers: {num_workers} (0=safe, 2-4=faster but may cause CUDA issues)")
        self._check_cuda_health("after data loader setup")

    def _setup_model(self):
        """Setup model and move to device."""
        logger.info("Setting up improved model...")

        # Select backbone based on configuration
        backbone = self.model_config.get('backbone', 'overlock')

        if backbone == 'resnet50':
            logger.warning("⚠️  Using ResNet50 backbone (fallback mode)")
            logger.warning("     Set backbone='overlock' in IMPROVED_MODEL_CONFIG to use OverLock")
            self.model = SimpleROIRefinerModel(
                device=DEVICE,
                unfreeze_layers=self.config.get('unfreeze_layers', 2),
                config=self.model_config
            )
        else:
            # Default: OverLock
            try:
                logger.info("Using OverLock backbone...")
                self.model = ImprovedROIRefinerModel(
                    device=DEVICE,
                    unfreeze_layers=self.config.get('unfreeze_layers', 2),
                    config=self.model_config
                )
            except Exception as e:
                logger.error(f"✗ Failed to initialize OverLock: {e}")

                # Check if this is a CUDA device-side assert error
                error_str = str(e).lower()
                if 'device-side assert' in error_str or 'cuda error' in error_str:
                    logger.error("=" * 80)
                    logger.error("CRITICAL: CUDA device-side assertion triggered!")
                    logger.error("=" * 80)
                    logger.error("The CUDA context has been corrupted and cannot be recovered in this process.")
                    logger.error("")
                    logger.error("SOLUTION: Please restart training with ResNet50 backbone:")
                    logger.error("")
                    logger.error("1. Edit src/config.py:")
                    logger.error("   IMPROVED_MODEL_CONFIG = {")
                    logger.error("       'backbone': 'resnet50',  # Change from 'overlock' to 'resnet50'")
                    logger.error("       ...")
                    logger.error("   }")
                    logger.error("")
                    logger.error("2. Or run with environment variable:")
                    logger.error("   export PRISM_BACKBONE=resnet50")
                    logger.error("   python main.py train-hp ...")
                    logger.error("")
                    logger.error("ResNet50 provides similar performance without the CUDA issues.")
                    logger.error("=" * 80)
                    raise RuntimeError(
                        "CUDA context corrupted by device-side assertion. "
                        "Please restart with backbone='resnet50' in config. "
                        "See error log above for instructions."
                    )

                # For other errors, try fallback
                logger.warning("⚠️  Falling back to ResNet50...")
                logger.warning("     To avoid this error in future, set backbone='resnet50' in config")

                self.model = SimpleROIRefinerModel(
                    device=DEVICE,
                    unfreeze_layers=self.config.get('unfreeze_layers', 2),
                    config=self.model_config
                )

        self.model.to(DEVICE)
        logger.info(f"Model moved to {DEVICE}")

        # Count parameters
        total_params = sum(p.numel() for p in self.model.parameters())
        trainable_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
        logger.info(f"Total parameters: {total_params:,}")
        logger.info(f"Trainable parameters: {trainable_params:,}")

    def _setup_training(self):
        """Setup loss functions, optimizer, and schedulers."""
        logger.info("Setting up training components...")

        # Loss functions
        if self.config.get('use_ohem') and self.config.get('use_focal_loss'):
            self.cls_criterion = OHEMFocalLoss(
                alpha=self.config.get('focal_alpha', 0.25),
                gamma=self.config.get('focal_gamma', 2.0),
                ohem_ratio=self.config.get('ohem_ratio', 0.7)
            )
            logger.info("Using OHEM + Focal Loss for classification")
        else:
            self.cls_criterion = nn.CrossEntropyLoss()
            logger.info("Using CrossEntropyLoss for classification")

        self.reg_criterion = BalancedL1Loss()
        logger.info("Using Balanced L1 Loss for regression")

        # Optimizer
        self.optimizer = optim.AdamW(
            self.model.parameters(),
            lr=self.config.get('learning_rate', 1e-4),
            weight_decay=self.config.get('weight_decay', 1e-4)
        )
        logger.info(f"Optimizer: AdamW (lr={self.config.get('learning_rate', 1e-4)})")

        # Learning rate scheduler
        self.scheduler = WarmupCosineScheduler(
            self.optimizer,
            warmup_epochs=self.config.get('warmup_epochs', 5),
            total_epochs=self.config['epochs']
        )

        # Mixed precision scaler
        self.scaler = GradScaler()

        # Early stopping
        if self.use_validation:
            self.early_stopping = EarlyStopping(
                patience=self.config.get('early_stopping_patience', 15)
            )

        # EMA and SWA
        self.ema = None
        self.swa = None
        if self.config.get('use_ema'):
            self.ema = ModelEMA(self.model, decay=self.config.get('ema_decay', 0.9999))
            logger.info("EMA enabled")

        if self.config.get('use_swa'):
            self.swa_start_epoch = int(self.config['epochs'] * self.config.get('swa_start_ratio', 0.75))
            self.swa = SWA(self.model)  # SWA.__init__ only takes model parameter
            logger.info(f"SWA enabled (start epoch: {self.swa_start_epoch})")
        else:
            self.swa_start_epoch = None

    def train_epoch(self, epoch: int) -> Dict[str, float]:
        """Train for one epoch."""
        self.model.train()

        total_loss = 0.0
        total_cls_loss = 0.0
        total_reg_loss = 0.0
        correct = 0
        total = 0

        pbar = tqdm(self.train_loader, desc=f"Epoch {epoch+1}/{self.config['epochs']}")

        for batch_idx, (roi_imgs, cls_labels, reg_targets, roi_positions) in enumerate(pbar):
            # Move to device
            roi_imgs = roi_imgs.to(DEVICE)
            cls_labels = cls_labels.to(DEVICE)
            reg_targets = reg_targets.to(DEVICE)
            roi_positions = roi_positions.to(DEVICE)

            # Forward pass with mixed precision
            with autocast():
                cls_logits, bbox_deltas, _ = self.model(roi_imgs, roi_positions)

                # Classification loss
                # Handle both OHEMFocalLoss (returns tuple) and CrossEntropyLoss (returns tensor)
                cls_loss_output = self.cls_criterion(cls_logits, cls_labels)
                if isinstance(cls_loss_output, tuple):
                    cls_loss, _ = cls_loss_output  # OHEMFocalLoss returns (loss, keep_mask)
                else:
                    cls_loss = cls_loss_output  # CrossEntropyLoss returns loss only

                # Regression loss (only for positive samples)
                # FIX: cls_labels are 0 (background) and 1-6 (foreground classes)
                # Only compute regression loss for foreground classes (> 0)
                pos_mask = cls_labels > 0  # Changed from >= 0 to > 0 to exclude background
                if pos_mask.sum() > 0:
                    # Select class-specific bbox deltas
                    pos_cls = cls_labels[pos_mask]  # Values: 1, 2, 3, 4, 5, 6
                    pos_deltas = bbox_deltas[pos_mask]

                    # Reshape: [N_pos, num_classes*4] -> [N_pos, num_classes, 4]
                    pos_deltas = pos_deltas.view(-1, self.num_classes, 4)

                    # Convert class labels to 0-indexed for regressor
                    # pos_cls is in range [1, num_classes], need [0, num_classes-1]
                    pos_cls_idx = pos_cls - 1

                    # Select deltas for ground truth class: [N_pos, 4]
                    selected_deltas = pos_deltas[torch.arange(pos_cls_idx.size(0)), pos_cls_idx]

                    # Compute regression loss
                    pos_reg_targets = reg_targets[pos_mask]
                    reg_loss = self.reg_criterion(selected_deltas, pos_reg_targets)
                else:
                    reg_loss = torch.tensor(0.0, device=DEVICE)

                # Total loss
                loss = cls_loss + self.bbox_loss_weight * reg_loss

            # Backward pass
            self.optimizer.zero_grad()
            self.scaler.scale(loss).backward()

            # Gradient clipping
            if self.config.get('gradient_clip'):
                self.scaler.unscale_(self.optimizer)
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config['gradient_clip'])

            self.scaler.step(self.optimizer)
            self.scaler.update()

            # Update EMA
            if self.ema:
                self.ema.update(self.model)

            # Statistics
            total_loss += loss.item()
            total_cls_loss += cls_loss.item()
            total_reg_loss += reg_loss.item()

            _, predicted = cls_logits.max(1)
            total += cls_labels.size(0)
            correct += predicted.eq(cls_labels).sum().item()

            # Update progress bar
            pbar.set_postfix({
                'loss': f'{loss.item():.4f}',
                'cls': f'{cls_loss.item():.4f}',
                'reg': f'{reg_loss.item():.4f}',
                'acc': f'{100.*correct/total:.2f}%'
            })

        # Epoch statistics
        avg_loss = total_loss / len(self.train_loader)
        avg_cls_loss = total_cls_loss / len(self.train_loader)
        avg_reg_loss = total_reg_loss / len(self.train_loader)
        accuracy = 100. * correct / total

        return {
            'loss': avg_loss,
            'cls_loss': avg_cls_loss,
            'reg_loss': avg_reg_loss,
            'accuracy': accuracy
        }

    @torch.no_grad()
    def validate(self) -> Dict[str, float]:
        """Validate the model."""
        self.model.eval()

        total_loss = 0.0
        total_cls_loss = 0.0
        total_reg_loss = 0.0
        correct = 0
        total = 0

        for roi_imgs, cls_labels, reg_targets, roi_positions in tqdm(self.val_loader, desc="Validation"):
            roi_imgs = roi_imgs.to(DEVICE)
            cls_labels = cls_labels.to(DEVICE)
            reg_targets = reg_targets.to(DEVICE)
            roi_positions = roi_positions.to(DEVICE)

            # Forward pass
            cls_logits, bbox_deltas, _ = self.model(roi_imgs, roi_positions)

            # Losses
            # Handle both OHEMFocalLoss (returns tuple) and CrossEntropyLoss (returns tensor)
            cls_loss_output = self.cls_criterion(cls_logits, cls_labels)
            if isinstance(cls_loss_output, tuple):
                cls_loss, _ = cls_loss_output  # OHEMFocalLoss returns (loss, keep_mask)
            else:
                cls_loss = cls_loss_output  # CrossEntropyLoss returns loss only

            # FIX: Same as train loop - exclude background class (0)
            pos_mask = cls_labels > 0
            if pos_mask.sum() > 0:
                pos_cls = cls_labels[pos_mask]
                pos_deltas = bbox_deltas[pos_mask].view(-1, self.num_classes, 4)
                # Convert class labels to 0-indexed for regressor
                pos_cls_idx = pos_cls - 1
                selected_deltas = pos_deltas[torch.arange(pos_cls_idx.size(0)), pos_cls_idx]
                pos_reg_targets = reg_targets[pos_mask]
                reg_loss = self.reg_criterion(selected_deltas, pos_reg_targets)
            else:
                reg_loss = torch.tensor(0.0, device=DEVICE)

            loss = cls_loss + self.bbox_loss_weight * reg_loss

            # Statistics
            total_loss += loss.item()
            total_cls_loss += cls_loss.item()
            total_reg_loss += reg_loss.item()

            _, predicted = cls_logits.max(1)
            total += cls_labels.size(0)
            correct += predicted.eq(cls_labels).sum().item()

        avg_loss = total_loss / len(self.val_loader)
        avg_cls_loss = total_cls_loss / len(self.val_loader)
        avg_reg_loss = total_reg_loss / len(self.val_loader)
        accuracy = 100. * correct / total

        return {
            'loss': avg_loss,
            'cls_loss': avg_cls_loss,
            'reg_loss': avg_reg_loss,
            'accuracy': accuracy
        }

    def train(self):
        """Main training loop."""
        logger.info("=" * 80)
        logger.info("Starting training...")
        logger.info("=" * 80)

        best_val_loss = float('inf')
        best_val_acc = 0.0

        for epoch in range(self.config['epochs']):
            # Update learning rate
            current_lr = self.scheduler.step(epoch)
            logger.info(f"\nEpoch {epoch+1}/{self.config['epochs']} - LR: {current_lr:.6f}")

            # Train
            train_metrics = self.train_epoch(epoch)
            logger.info(f"Train - Loss: {train_metrics['loss']:.4f}, Acc: {train_metrics['accuracy']:.2f}%")

            # Validate
            if self.use_validation:
                val_metrics = self.validate()
                logger.info(f"Val   - Loss: {val_metrics['loss']:.4f}, Acc: {val_metrics['accuracy']:.2f}%")

                # Save best model
                if val_metrics['loss'] < best_val_loss:
                    best_val_loss = val_metrics['loss']
                    self.save_checkpoint('best_val_loss')
                    logger.info(f"✓ Best validation loss: {best_val_loss:.4f}")

                if val_metrics['accuracy'] > best_val_acc:
                    best_val_acc = val_metrics['accuracy']
                    self.save_checkpoint('best_val_acc')
                    logger.info(f"✓ Best validation accuracy: {best_val_acc:.2f}%")

                # Early stopping
                if self.early_stopping(val_metrics['loss']):
                    logger.info("Early stopping triggered")
                    break

            # Update SWA (only after swa_start_epoch)
            if self.swa and epoch >= self.swa_start_epoch:
                self.swa.update(self.model)

            # Save checkpoint every 10 epochs
            if (epoch + 1) % 10 == 0:
                self.save_checkpoint(f'epoch_{epoch+1}')

        # Final saves
        logger.info("\nTraining completed!")
        self.save_checkpoint('final')

        if self.ema:
            self.save_checkpoint('final_ema', use_ema=True)

        if self.swa:
            self.save_checkpoint('final_swa', use_swa=True)

        logger.info("=" * 80)

    def save_checkpoint(self, name: str, use_ema: bool = False, use_swa: bool = False):
        """Save model checkpoint with unified naming: stage2_{name}.pth"""
        save_dir = Path(WEIGHTS_DIR)
        save_dir.mkdir(exist_ok=True)

        if use_ema and self.ema:
            state_dict = self.ema.ema_model.state_dict()
            save_path = save_dir / f'stage2_{name}_ema.pth'  # 统一命名
        elif use_swa and self.swa:
            state_dict = self.swa.get_averaged_model().state_dict()
            save_path = save_dir / f'stage2_{name}_swa.pth'  # 统一命名
        else:
            state_dict = self.model.state_dict()
            save_path = save_dir / f'stage2_{name}.pth'  # 统一命名

        torch.save({
            'model_state_dict': state_dict,
            'config': self.config,
            'model_config': self.model_config
        }, save_path)

        logger.info(f"Checkpoint saved: {save_path}")


def run_training_stage2_improved(config=None, model_config=None):
    """
    Run improved Stage 2 training.

    Args:
        config: Training configuration (uses STAGE2_CONFIG if None)
        model_config: Model configuration (uses IMPROVED_MODEL_CONFIG if None)
    """
    if config is None:
        config = STAGE2_CONFIG

    if model_config is None:
        model_config = IMPROVED_MODEL_CONFIG

    trainer = ImprovedStage2Trainer(config, model_config, use_validation=True)
    trainer.train()


# Backward compatibility alias for train_stage2_unified
def run_training_stage2_unified(use_advanced=True, use_validation=True, config=None, model_config=None):
    """
    Backward compatibility wrapper for train_stage2_unified.

    This function provides the same interface as the old train_stage2_unified script
    but uses the improved training pipeline internally.

    Args:
        use_advanced: Whether to use advanced features (ignored, always uses advanced)
        use_validation: Whether to use validation set
        config: Training configuration (uses STAGE2_CONFIG if None)
        model_config: Model configuration (uses IMPROVED_MODEL_CONFIG if None)
    """
    if config is None:
        config = STAGE2_CONFIG

    if model_config is None:
        model_config = IMPROVED_MODEL_CONFIG

    if not use_advanced:
        logger.warning("⚠️  use_advanced=False is deprecated. Always using advanced features.")
        logger.warning("     The old basic mode is no longer supported.")

    trainer = ImprovedStage2Trainer(config, model_config, use_validation=use_validation)
    trainer.train()


# Default alias
run_training_stage2 = run_training_stage2_improved


if __name__ == '__main__':
    run_training_stage2_improved()
