"""Stage 2 diagnostic tools."""
import json
import logging
import sys
from pathlib import Path
from typing import Optional
from collections import Counter, defaultdict

import torch
import yaml
import numpy as np
from tqdm import tqdm
from PIL import Image
from torchvision import transforms

from src.utils.bbox_utils import compute_iou

logger = logging.getLogger(__name__)


def diagnose_stage2(proposals_json: Optional[str] = None,
                    weights_path: Optional[str] = None,
                    data_yaml: str = 'dataset/data.yaml') -> None:
    """
    Basic Stage 2 diagnostics.

    Args:
        proposals_json: Path to proposals JSON file
        weights_path: Path to Stage 2 weights
        data_yaml: Path to data.yaml configuration
    """
    from src.config import STAGE2_CONFIG, DEVICE
    from src.models.refiner import ROIRefinerModel
    from src.dataset import ROIDataset

    proposals_json = proposals_json or STAGE2_CONFIG['proposals_json']
    weights_path = weights_path or STAGE2_CONFIG['weights_path']

    logger.info("=" * 80)
    logger.info("Stage 2 Diagnostics")
    logger.info("=" * 80)

    # 1. Check proposals file
    logger.info("\n[1/5] Checking proposals file...")
    if not Path(proposals_json).exists():
        logger.error(f"❌ proposals.json does not exist!")
        logger.error(f"   Please run: python main.py gen-proposals")
        return

    with open(proposals_json) as f:
        proposals = json.load(f)

    total_rois = sum(len(item['rois']) for item in proposals)
    logger.info(f"✅ Proposals file exists")
    logger.info(f"   Total proposals: {total_rois}")
    logger.info(f"   Average per image: {total_rois / len(proposals):.1f}")

    # Get number of classes
    with open(data_yaml) as f:
        config = yaml.safe_load(f)
        num_classes = config['nc']

    # 2. Check model initialization
    logger.info("\n[2/5] Checking model initialization...")
    try:
        model = ROIRefinerModel(device=DEVICE)
        logger.info(f"✅ Model initialized successfully")

        total_params = sum(p.numel() for p in model.parameters())
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)

        logger.info(f"   Total parameters: {total_params:,}")
        logger.info(f"   Trainable parameters: {trainable_params:,}")
        logger.info(f"   Frozen parameters: {total_params - trainable_params:,}")

        if trainable_params == 0:
            logger.error(f"   ❌ All parameters are frozen! Cannot train")
        elif trainable_params < 1_000_000:
            logger.warning(f"   ⚠️ Too few trainable parameters, may affect training")
        else:
            logger.info(f"   ✅ Reasonable number of trainable parameters")

    except Exception as e:
        logger.error(f"❌ Model initialization failed: {e}")
        import traceback
        traceback.print_exc()
        return

    # 3. Test forward pass
    logger.info("\n[3/5] Testing forward pass...")
    try:
        device = next(model.parameters()).device
        dummy_input = torch.randn(2, 3, 224, 224).to(device)

        with torch.no_grad():
            cls_logits, bbox_deltas = model(dummy_input)

        logger.info(f"✅ Forward pass successful")
        logger.info(f"   Classification output shape: {cls_logits.shape}")
        logger.info(f"   Regression output shape: {bbox_deltas.shape}")
        logger.info(f"   Classification logits range: [{cls_logits.min():.2f}, {cls_logits.max():.2f}]")
        logger.info(f"   Regression deltas range: [{bbox_deltas.min():.2f}, {bbox_deltas.max():.2f}]")

        if torch.isnan(cls_logits).any() or torch.isnan(bbox_deltas).any():
            logger.error(f"   ❌ Output contains NaN!")
        elif torch.isinf(cls_logits).any() or torch.isinf(bbox_deltas).any():
            logger.error(f"   ❌ Output contains Inf!")
        else:
            logger.info(f"   ✅ Output is normal")

    except Exception as e:
        logger.error(f"❌ Forward pass failed: {e}")
        import traceback
        traceback.print_exc()

    # 4. Check training weights
    logger.info("\n[4/5] Checking training weights...")

    if not Path(weights_path).exists():
        logger.warning(f"⚠️ Weights file does not exist: {weights_path}")
        logger.info(f"   Model may not have been trained")
    else:
        try:
            checkpoint = torch.load(weights_path, map_location='cpu', weights_only=False)
            logger.info(f"✅ Weights file exists")

            if isinstance(checkpoint, dict):
                logger.info(f"   Keys included: {list(checkpoint.keys())}")

                if 'epoch' in checkpoint:
                    logger.info(f"   Training epochs: {checkpoint['epoch']}")

                if 'metrics' in checkpoint:
                    logger.info(f"   Saved metrics: {checkpoint['metrics']}")

            if 'model_state_dict' in checkpoint:
                model.load_state_dict(checkpoint['model_state_dict'])
                logger.info(f"   ✅ Weights loaded successfully")
            else:
                model.load_state_dict(checkpoint)
                logger.info(f"   ✅ Weights loaded successfully")

        except Exception as e:
            logger.error(f"   ❌ Weights loading failed: {e}")

    # 5. Check training data distribution
    logger.info("\n[5/5] Checking training data...")
    try:
        dataset = ROIDataset(
            proposals_file=proposals_json,
            num_classes=num_classes
        )

        logger.info(f"✅ Dataset loaded successfully")
        logger.info(f"   Total samples: {len(dataset)}")

        if len(dataset) == 0:
            logger.error("❌ Dataset is empty!")
        else:
            # Test loading one sample
            try:
                sample = dataset[0]

                if 'roi_img' not in sample or 'label' not in sample or 'bbox_target' not in sample:
                    logger.error("❌ Dataset sample missing required fields")
                else:
                    logger.info(f"   Sample fields:")
                    logger.info(f"      roi_img: {sample['roi_img'].shape}")
                    logger.info(f"      label: {sample['label']}")
                    logger.info(f"      bbox_target: {sample['bbox_target'].shape}")

            except Exception as e:
                logger.error(f"❌ Failed to load sample: {e}")

            # Sample statistics
            logger.info(f"   Analyzing first 1000 samples...")
            sample_types = {'positive': 0, 'negative': 0, 'ignore': 0}

            for i in range(min(1000, len(dataset))):
                label = dataset[i]['label']
                if label == num_classes:
                    sample_types['negative'] += 1
                elif label == -1:
                    sample_types['ignore'] += 1
                else:
                    sample_types['positive'] += 1

            total_checked = min(1000, len(dataset))
            logger.info(f"   Sample type distribution (first {total_checked} samples):")
            logger.info(f"      Positive: {sample_types['positive']} ({sample_types['positive'] / total_checked * 100:.1f}%)")
            logger.info(f"      Negative: {sample_types['negative']} ({sample_types['negative'] / total_checked * 100:.1f}%)")
            logger.info(f"      Ignore: {sample_types['ignore']} ({sample_types['ignore'] / total_checked * 100:.1f}%)")

            if sample_types['positive'] < total_checked * 0.3:
                logger.warning("⚠️ Low positive sample ratio (<30%), may be difficult to train")

            if sample_types['negative'] < total_checked * 0.05:
                logger.warning("⚠️ Too few negative samples (<5%), may overfit")

    except Exception as e:
        logger.error(f"❌ Dataset check failed: {e}")
        import traceback
        traceback.print_exc()

    logger.info("\n" + "=" * 80)
    logger.info("Diagnostics complete!")
    logger.info("=" * 80)


def comprehensive_stage2_check(data_yaml: str = 'dataset/data.yaml',
                               proposals_json: Optional[str] = None) -> int:
    """
    Comprehensive Stage 2 logic check.

    Args:
        data_yaml: Path to data.yaml
        proposals_json: Path to proposals JSON

    Returns:
        0 if all checks pass, 1 if issues found
    """
    from src.config import STAGE2_CONFIG, DATA_YAML, DEVICE

    proposals_json = proposals_json or STAGE2_CONFIG['proposals_json']

    logger.info("=" * 80)
    logger.info("Comprehensive Stage 2 Check")
    logger.info("=" * 80)

    issues = []
    warnings = []

    # Check configuration
    logger.info("\n[1/8] Checking configuration...")
    try:
        logger.info(f"  ✅ Configuration loaded")
        logger.info(f"    DATA_YAML: {DATA_YAML}")
        logger.info(f"    Proposals: {proposals_json}")
        logger.info(f"    Device: {DEVICE}")

        if not Path(DATA_YAML).exists():
            issues.append(f"data.yaml does not exist: {DATA_YAML}")

        if not Path(proposals_json).exists():
            issues.append(f"proposals.json does not exist: {proposals_json}")

    except Exception as e:
        issues.append(f"Configuration loading failed: {e}")

    # Check data.yaml
    logger.info("\n[2/8] Checking data.yaml...")
    try:
        with open(DATA_YAML) as f:
            data_config = yaml.safe_load(f)

        num_classes = data_config['nc']
        class_names = data_config['names']

        logger.info(f"  ✅ data.yaml parsed successfully")
        logger.info(f"    Number of classes: {num_classes}")
        logger.info(f"    Class names: {class_names}")

        if num_classes != len(class_names):
            issues.append(f"Class count mismatch: nc={num_classes}, len(names)={len(class_names)}")

    except Exception as e:
        issues.append(f"data.yaml parsing failed: {e}")

    # Check proposals.json
    logger.info("\n[3/8] Checking proposals.json...")
    try:
        with open(proposals_json) as f:
            proposals_data = json.load(f)

        logger.info(f"  ✅ proposals.json loaded successfully")
        logger.info(f"    Total entries: {len(proposals_data)}")

        if len(proposals_data) > 0:
            sample = proposals_data[0]
            required_keys = ['img_path', 'rois', 'labels']

            for key in required_keys:
                if key not in sample:
                    issues.append(f"proposals missing field: {key}")

            total_rois = sum(len(item['rois']) for item in proposals_data)
            total_gts = sum(len(item['labels']) for item in proposals_data)

            logger.info(f"    Total proposals: {total_rois}")
            logger.info(f"    Total GT: {total_gts}")
            logger.info(f"    Average proposals/image: {total_rois / len(proposals_data):.1f}")

            if total_rois / len(proposals_data) < 5:
                warnings.append("Too few proposals (<5/image), may affect training")

    except Exception as e:
        issues.append(f"proposals.json check failed: {e}")

    # Check dataset class
    logger.info("\n[4/8] Checking dataset class...")
    try:
        from src.dataset import ROIDataset

        dataset = ROIDataset(
            proposals_json,
            num_classes=num_classes
        )

        logger.info(f"  ✅ Dataset created successfully")
        logger.info(f"    Sample count: {len(dataset)}")

        if len(dataset) == 0:
            issues.append("Dataset is empty!")

    except Exception as e:
        issues.append(f"Dataset check failed: {e}")

    # Summary
    logger.info("\n" + "=" * 80)
    logger.info("Check complete!")
    logger.info("=" * 80)

    if len(issues) == 0 and len(warnings) == 0:
        logger.info("\n✅✅✅ All checks passed! Ready to train!")
        return 0

    if len(issues) > 0:
        logger.error(f"\n❌ Found {len(issues)} critical issues:")
        for i, issue in enumerate(issues, 1):
            logger.error(f"  {i}. {issue}")
        logger.error("\n⚠️ Must fix these issues before training!")

    if len(warnings) > 0:
        logger.warning(f"\n⚠️ Found {len(warnings)} warnings:")
        for i, warning in enumerate(warnings, 1):
            logger.warning(f"  {i}. {warning}")
        logger.warning("\nCan continue training, but please pay attention to these issues.")

    logger.info("\n" + "=" * 80)

    return 1 if len(issues) > 0 else 0


def diagnose_model_collapse(data_yaml: str = 'dataset/data.yaml',
                            weights_path: Optional[str] = None,
                            device: str = 'cuda',
                            num_test_images: int = 100) -> None:
    """
    Diagnose if model is collapsing (always predicting same class).

    Args:
        data_yaml: Path to data.yaml
        weights_path: Path to Stage 2 weights
        device: Device to use
        num_test_images: Number of test images to analyze
    """
    from src.config import STAGE2_CONFIG, DEVICE
    from src.models.proposer import YOLOProposer
    from src.models.refiner import ROIRefinerModel

    weights_path = weights_path or STAGE2_CONFIG['weights_path']
    device = device or DEVICE

    logger.info("=" * 80)
    logger.info("Model Collapse Diagnostics")
    logger.info("=" * 80)

    # Load models
    with open(data_yaml) as f:
        data = yaml.safe_load(f)
        class_names = data['names']
        num_classes = data['nc']

    proposer = YOLOProposer('weights/stage1_proposer.pt', device)
    refiner = ROIRefinerModel(device=device)

    checkpoint = torch.load(weights_path, map_location=device, weights_only=False)
    refiner.load_state_dict(checkpoint.get('model_state_dict', checkpoint))
    refiner.eval()

    # Test set
    test_dir = Path(data_yaml).parent / data['test']
    test_images = list(test_dir.glob('*.jpg')) + list(test_dir.glob('*.png'))

    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])

    # Collect predictions
    all_predictions = []
    all_confidences = []
    bbox_deltas_stats = []

    logger.info("\nCollecting predictions...")

    for img_path in tqdm(test_images[:num_test_images], desc="Processing", ncols=80):
        rois = proposer.propose(str(img_path), conf_thresh=0.001, iou_thresh=0.5)

        if len(rois) == 0:
            continue

        full_image = Image.open(img_path).convert("RGB")
        roi_batch = []

        for box in rois:
            x1, y1, x2, y2 = map(int, box)
            x1, y1 = max(0, x1), max(0, y1)
            x2, y2 = min(full_image.width, x2), min(full_image.height, y2)

            if x2 > x1 and y2 > y1:
                roi_img = full_image.crop((x1, y1, x2, y2))
                roi_batch.append(transform(roi_img))

        if len(roi_batch) == 0:
            continue

        roi_tensors = torch.stack(roi_batch).to(device)

        with torch.no_grad():
            class_logits, bbox_deltas = refiner(roi_tensors)

        scores = torch.softmax(class_logits, dim=1)
        max_probs, pred_classes = torch.max(scores, dim=1)

        for i in range(len(pred_classes)):
            all_predictions.append(pred_classes[i].item())
            all_confidences.append(max_probs[i].item())
            bbox_deltas_stats.append(bbox_deltas[i].abs().mean().item())

    logger.info(f"\nCollected {len(all_predictions)} predictions")

    # Analyze prediction distribution
    pred_counter = Counter(all_predictions)

    logger.info(f"\n{'=' * 80}")
    logger.info("Prediction Class Distribution")
    logger.info(f"{'=' * 80}")

    for cls_id in sorted(pred_counter.keys()):
        count = pred_counter[cls_id]
        pct = count / len(all_predictions) * 100

        if cls_id == num_classes:
            cls_name = "Background"
        else:
            cls_name = class_names[cls_id]

        bar = "█" * int(pct / 2)
        logger.info(f"{cls_name:15s}: {count:5d} ({pct:5.1f}%) {bar}")

    # Check for collapse
    most_common_cls, most_common_count = pred_counter.most_common(1)[0]
    most_common_pct = most_common_count / len(all_predictions)

    logger.info(f"\n{'=' * 80}")
    logger.info("Collapse Analysis")
    logger.info(f"{'=' * 80}")

    if most_common_pct > 0.8:
        logger.error(f"❌ Severe model collapse!")
        logger.error(f"   {most_common_pct * 100:.1f}% of predictions are the same class")
        if most_common_cls == num_classes:
            logger.info(f"   Class: Background")
            logger.info(f"\nReason: Model learned to treat everything as background")
        else:
            logger.info(f"   Class: {class_names[most_common_cls]}")
            logger.info(f"\nReason: Severely imbalanced training data")
    elif most_common_pct > 0.5:
        logger.warning(f"⚠️ Partial model collapse")
        logger.warning(f"   {most_common_pct * 100:.1f}% of predictions concentrated in one class")
    else:
        logger.info(f"✅ Prediction distribution is normal")

    # Confidence analysis
    logger.info(f"\n{'=' * 80}")
    logger.info("Confidence Analysis")
    logger.info(f"{'=' * 80}")

    logger.info(f"Average confidence: {np.mean(all_confidences):.4f}")
    logger.info(f"Median confidence: {np.median(all_confidences):.4f}")
    logger.info(f"Min confidence: {min(all_confidences):.4f}")
    logger.info(f"Max confidence: {max(all_confidences):.4f}")

    # Bbox regression analysis
    logger.info(f"\n{'=' * 80}")
    logger.info("Bounding Box Regression Analysis")
    logger.info(f"{'=' * 80}")

    logger.info(f"Average absolute delta: {np.mean(bbox_deltas_stats):.4f}")
    logger.info(f"Median delta: {np.median(bbox_deltas_stats):.4f}")

    if np.mean(bbox_deltas_stats) < 0.01:
        logger.error(f"\n❌ Bbox regression barely working!")
        logger.error(f"   Delta close to 0, model did not learn regression")
    elif np.mean(bbox_deltas_stats) > 2.0:
        logger.error(f"\n❌ Bbox regression unstable!")
        logger.error(f"   Delta too large, possible gradient explosion")
    else:
        logger.info(f"\n✅ Bbox regression is normal")


def diagnose_bbox_regression(proposals_json: str = 'proposals/proposals.json',
                             num_images: int = 500) -> None:
    """
    Diagnose bounding box regression issues.

    Args:
        proposals_json: Path to proposals JSON
        num_images: Number of images to analyze
    """
    logger.info("=" * 80)
    logger.info("Bounding Box Regression Diagnostics")
    logger.info("=" * 80)

    with open(proposals_json) as f:
        proposals = json.load(f)

    logger.info(f"\nAnalyzing {min(len(proposals), num_images)} images...")

    iou_distribution = []
    proposals_per_image = []
    matched_proposals_per_image = []

    for item in proposals[:num_images]:
        rois = item['rois']
        gts = item['labels']

        proposals_per_image.append(len(rois))

        if len(gts) == 0:
            continue

        matched = 0
        for roi in rois:
            best_iou = 0
            for gt in gts:
                gt_bbox = gt[1:]
                iou = compute_iou(roi, gt_bbox)
                best_iou = max(best_iou, iou)

            iou_distribution.append(best_iou)
            if best_iou > 0.5:
                matched += 1

        matched_proposals_per_image.append(matched)

    logger.info(f"\nProposals statistics:")
    logger.info(f"  Average proposals/image: {np.mean(proposals_per_image):.1f}")
    logger.info(f"  Average matched proposals/image: {np.mean(matched_proposals_per_image):.1f}")

    logger.info(f"\nProposals vs GT IoU distribution:")
    logger.info(f"  Min IoU: {min(iou_distribution):.3f}")
    logger.info(f"  Average IoU: {np.mean(iou_distribution):.3f}")
    logger.info(f"  Median IoU: {np.median(iou_distribution):.3f}")
    logger.info(f"  Max IoU: {max(iou_distribution):.3f}")

    logger.info(f"\nIoU threshold pass rate:")
    for thresh in [0.3, 0.5, 0.7]:
        count = sum(1 for iou in iou_distribution if iou > thresh)
        pct = count / len(iou_distribution) * 100
        logger.info(f"  IoU>{thresh}: {count}/{len(iou_distribution)} ({pct:.1f}%)")

    logger.info(f"\n{'=' * 80}")
    logger.info("Problem Analysis")
    logger.info(f"{'=' * 80}")

    avg_iou = np.mean(iou_distribution)
    pass_rate_05 = sum(1 for iou in iou_distribution if iou > 0.5) / len(iou_distribution)

    if avg_iou < 0.4:
        logger.error("❌ Stage 1 generated proposals are of poor quality!")
        logger.error(f"   Average IoU is only {avg_iou:.3f}")
        logger.error(f"   Only {pass_rate_05 * 100:.1f}% of proposals have IoU>0.5")
        logger.error(f"\nThis is the root problem:")
        logger.error(f"   - Stage 1 proposals don't match GT")
        logger.error(f"   - Stage 2 learns incorrect bbox mapping")
        logger.error(f"   - Even if classification is correct, bbox is wrong")
        logger.info(f"\nSolutions:")
        logger.info(f"   1. Retrain Stage 1 to improve proposal quality")
        logger.info(f"   2. Or lower proposals conf threshold to 0.0001")
        logger.info(f"   3. Or increase tile_overlap")
    elif pass_rate_05 < 0.7:
        logger.warning("⚠️ Stage 1 proposal quality is average")
        logger.warning(f"   Only {pass_rate_05 * 100:.1f}% of proposals have IoU>0.5")
        logger.info(f"\nCan be improved:")
        logger.info(f"   - Adjust proposal generation parameters")
        logger.info(f"   - Or accept current quality")
    else:
        logger.info("✅ Stage 1 proposal quality is good")
        logger.info(f"   {pass_rate_05 * 100:.1f}% of proposals have IoU>0.5")
        logger.info(f"\nThe problem is in Stage 2 bbox regression!")
        logger.info(f"   - Proposal quality is OK")
        logger.info(f"   - But Stage 2 didn't learn regression")
        logger.info(f"\nPossible reasons:")
        logger.info(f"   1. Regression loss weight too small")
        logger.info(f"   2. Not enough training epochs")
        logger.info(f"   3. Regression head too weak")
