#!/usr/bin/env python
"""
Debug script to check CUDA state at each stage of training.
"""

import torch
import sys
import logging

logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')
logger = logging.getLogger(__name__)


def check_cuda_state(stage_name):
    """Check and report CUDA state."""
    logger.info("=" * 80)
    logger.info(f"CUDA State Check: {stage_name}")
    logger.info("=" * 80)

    try:
        # Check CUDA availability
        cuda_available = torch.cuda.is_available()
        logger.info(f"  CUDA available: {cuda_available}")

        if not cuda_available:
            logger.error("  CUDA not available!")
            return False

        # Check for existing errors
        device = torch.device('cuda')
        try:
            # Try to create a simple tensor
            test_tensor = torch.zeros(1, device=device)
            logger.info(f"  ✓ Can create tensors on CUDA")

            # Try a simple operation
            result = test_tensor + 1
            logger.info(f"  ✓ Can perform operations on CUDA")

            # Check memory
            allocated = torch.cuda.memory_allocated() / (1024**2)  # MB
            reserved = torch.cuda.memory_reserved() / (1024**2)  # MB
            logger.info(f"  Memory allocated: {allocated:.1f} MB")
            logger.info(f"  Memory reserved: {reserved:.1f} MB")

            # Clean up
            del test_tensor, result
            torch.cuda.empty_cache()

            logger.info(f"  ✓ CUDA state is healthy")
            return True

        except Exception as e:
            logger.error(f"  ✗ CUDA Error: {e}")
            return False

    except Exception as e:
        logger.error(f"  ✗ Fatal error checking CUDA: {e}")
        return False


def main():
    logger.info("Starting CUDA state monitoring during training...")
    logger.info("")

    # Initial state
    if not check_cuda_state("Initial State"):
        logger.error("CUDA not working at startup!")
        sys.exit(1)

    logger.info("")
    logger.info("Now testing each training component initialization...")
    logger.info("")

    # Test 1: Data loading
    logger.info("Test 1: Setting up data loaders...")
    try:
        from src.training.train_stage2 import ROIDataset
        from src.config import STAGE2_CONFIG
        from torchvision import transforms
        from torch.utils.data import DataLoader

        transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])

        dataset = ROIDataset(STAGE2_CONFIG['proposals_json'], transform=transform)
        logger.info(f"  Dataset created: {len(dataset)} samples")

        dataloader = DataLoader(
            dataset,
            batch_size=8,
            shuffle=False,
            num_workers=0,  # Use 0 to avoid multiprocessing issues
            pin_memory=True
        )
        logger.info(f"  DataLoader created")

        # Try to load one batch
        batch = next(iter(dataloader))
        logger.info(f"  ✓ Successfully loaded one batch")
        logger.info(f"    Batch shapes: images={batch[0].shape}, labels={batch[1].shape}")

        del dataset, dataloader, batch

    except Exception as e:
        logger.error(f"  ✗ Data loading failed: {e}")
        import traceback
        traceback.print_exc()

    if not check_cuda_state("After Data Loading"):
        logger.error("CUDA corrupted after data loading!")
        sys.exit(1)

    # Test 2: OverLock initialization
    logger.info("")
    logger.info("Test 2: Initializing OverLock...")
    try:
        import sys
        sys.path.append('/root/PRISM-PRI/models_ext')
        from overlock_local import overlock_t

        logger.info("  Creating OverLock on CPU...")
        model = overlock_t()
        logger.info("  ✓ OverLock created on CPU")

        logger.info("  Moving OverLock to CUDA...")
        model = model.to('cuda')
        logger.info("  ✓ OverLock moved to CUDA")

        # Try forward pass
        logger.info("  Testing forward pass...")
        test_input = torch.randn(1, 3, 224, 224, device='cuda')
        with torch.no_grad():
            output = model(test_input)
        logger.info(f"  ✓ Forward pass successful: {output.shape}")

        del model, test_input, output
        torch.cuda.empty_cache()

    except Exception as e:
        logger.error(f"  ✗ OverLock initialization failed: {e}")
        import traceback
        traceback.print_exc()

    if not check_cuda_state("After OverLock Initialization"):
        logger.error("CUDA corrupted after OverLock initialization!")
        sys.exit(1)

    # Test 3: DINOv2 initialization
    logger.info("")
    logger.info("Test 3: Initializing DINOv2...")
    try:
        import torch.hub

        logger.info("  Loading DINOv2 model...")
        dino = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14', pretrained=True)
        dino = dino.to('cuda')
        dino.eval()
        logger.info("  ✓ DINOv2 loaded and moved to CUDA")

        # Try forward pass
        logger.info("  Testing forward pass...")
        test_input = torch.randn(1, 3, 224, 224, device='cuda')
        with torch.no_grad():
            output = dino(test_input)
        logger.info(f"  ✓ Forward pass successful: {output.shape}")

        del dino, test_input, output
        torch.cuda.empty_cache()

    except Exception as e:
        logger.error(f"  ✗ DINOv2 initialization failed: {e}")
        import traceback
        traceback.print_exc()

    if not check_cuda_state("After DINOv2 Initialization"):
        logger.error("CUDA corrupted after DINOv2 initialization!")
        sys.exit(1)

    # Test 4: Full ImprovedROIRefinerModel
    logger.info("")
    logger.info("Test 4: Initializing full ImprovedROIRefinerModel...")
    try:
        from src.models.refiner_improved import ImprovedROIRefinerModel
        from src.config import IMPROVED_MODEL_CONFIG

        logger.info("  Creating ImprovedROIRefinerModel...")
        model = ImprovedROIRefinerModel(
            device='cuda',
            unfreeze_layers=2,
            config=IMPROVED_MODEL_CONFIG
        )
        logger.info("  ✓ Model created")

        # Try forward pass
        logger.info("  Testing forward pass...")
        test_input = torch.randn(8, 3, 224, 224, device='cuda')
        with torch.no_grad():
            cls_out, bbox_out = model(test_input)
        logger.info(f"  ✓ Forward pass successful")
        logger.info(f"    Classification output: {cls_out.shape}")
        logger.info(f"    Bbox output: {bbox_out.shape}")

        del model, test_input, cls_out, bbox_out
        torch.cuda.empty_cache()

    except Exception as e:
        logger.error(f"  ✗ ImprovedROIRefinerModel initialization failed: {e}")
        import traceback
        traceback.print_exc()

    if not check_cuda_state("After ImprovedROIRefinerModel Initialization"):
        logger.error("CUDA corrupted after model initialization!")
        sys.exit(1)

    # Test 5: Create ImprovedStage2Trainer (full initialization)
    logger.info("")
    logger.info("Test 5: Creating ImprovedStage2Trainer (full pipeline)...")
    try:
        from src.training.train_stage2 import ImprovedStage2Trainer
        from src.config import STAGE2_CONFIG, IMPROVED_MODEL_CONFIG

        logger.info("  Creating trainer...")
        trainer = ImprovedStage2Trainer(
            config=STAGE2_CONFIG,
            model_config=IMPROVED_MODEL_CONFIG,
            use_validation=False
        )
        logger.info("  ✓ Trainer created successfully")

        del trainer
        torch.cuda.empty_cache()

    except Exception as e:
        logger.error(f"  ✗ Trainer creation failed: {e}")
        import traceback
        traceback.print_exc()

    if not check_cuda_state("After Trainer Creation"):
        logger.error("CUDA corrupted after trainer creation!")
        sys.exit(1)

    logger.info("")
    logger.info("=" * 80)
    logger.info("✓ ALL TESTS PASSED - CUDA state remains healthy throughout!")
    logger.info("=" * 80)
    logger.info("")
    logger.info("Conclusion: The issue must be in the specific training context.")
    logger.info("Possible causes:")
    logger.info("  1. Memory pressure from previous stages")
    logger.info("  2. Interaction with DataLoader multiprocessing (num_workers)")
    logger.info("  3. State corruption from Stage 1 or proposal generation")
    logger.info("  4. Environmental factors (other processes using GPU)")


if __name__ == '__main__':
    main()
