"""Stage 1 diagnostic tools."""
import os
import logging
from pathlib import Path
from typing import Optional, List, Tuple
import yaml
import numpy as np
import torch
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from ultralytics import YOLO
from tqdm import tqdm

logger = logging.getLogger(__name__)


def diagnose_stage1(data_yaml: str = 'dataset/data.yaml',
                    weights_path: str = 'weights/stage1_proposer.pt',
                    output_dir: str = '.') -> None:
    """
    Comprehensive Stage 1 diagnostics.

    Args:
        data_yaml: Path to data.yaml configuration
        weights_path: Path to Stage 1 weights
        output_dir: Directory to save diagnostic outputs
    """
    logger.info("=" * 80)
    logger.info("Stage 1 (YOLO) Diagnostic Tool")
    logger.info("=" * 80)

    # 1. Check configuration file
    logger.info("\n[1/7] Checking configuration file...")
    if not os.path.exists(data_yaml):
        logger.error(f"❌ Cannot find {data_yaml}")
        return

    with open(data_yaml, 'r') as f:
        config = yaml.safe_load(f)
        logger.info(f"✅ data.yaml loaded successfully")
        logger.info(f"   Number of classes: {config['nc']}")
        logger.info(f"   Class names: {config['names']}")
        logger.info(f"   Train set: {config['train']}")
        logger.info(f"   Val set: {config.get('val', 'N/A')}")
        logger.info(f"   Test set: {config.get('test', 'N/A')}")

    # 2. Check dataset paths
    logger.info("\n[2/7] Checking dataset paths...")
    base_dir = Path(data_yaml).parent

    train_img_dir = base_dir / config['train']
    train_label_dir = base_dir / config['train'].replace('images', 'labels')

    if not train_img_dir.exists():
        logger.error(f"❌ Training image directory does not exist: {train_img_dir}")
        return
    else:
        train_images = list(train_img_dir.glob('*.jpg')) + list(train_img_dir.glob('*.png'))
        logger.info(f"✅ Training image directory: {train_img_dir}")
        logger.info(f"   Number of images: {len(train_images)}")

    if not train_label_dir.exists():
        logger.error(f"❌ Label directory does not exist: {train_label_dir}")
        return
    else:
        train_labels = list(train_label_dir.glob('*.txt'))
        logger.info(f"✅ Label directory: {train_label_dir}")
        logger.info(f"   Number of labels: {len(train_labels)}")

    # 3. Check data integrity
    logger.info("\n[3/7] Checking data integrity...")
    missing_labels = 0
    empty_labels = 0
    invalid_labels = 0

    check_count = min(100, len(train_images))
    for img_path in train_images[:check_count]:
        label_path = train_label_dir / f"{img_path.stem}.txt"

        if not label_path.exists():
            missing_labels += 1
            continue

        with open(label_path, 'r') as f:
            lines = f.readlines()

        if len(lines) == 0:
            empty_labels += 1
            continue

        for line in lines:
            parts = line.strip().split()
            if len(parts) != 5:
                invalid_labels += 1
                logger.warning(f"   ⚠️ Invalid format: {label_path.name} - {line.strip()}")
                break

    logger.info(f"   Missing labels: {missing_labels}/{check_count}")
    logger.info(f"   Empty labels: {empty_labels}/{check_count}")
    logger.info(f"   Invalid format: {invalid_labels}/{check_count}")

    if missing_labels > check_count * 0.1:
        logger.error(f"   ❌ More than 10% of images are missing labels!")
    elif empty_labels > check_count * 0.1:
        logger.error(f"   ❌ More than 10% of labels are empty!")
    elif invalid_labels > 0:
        logger.error(f"   ❌ Found invalid label format!")
    else:
        logger.info(f"   ✅ Data integrity is good")

    # 4. Check model weights
    logger.info("\n[4/7] Checking model weights...")

    if not os.path.exists(weights_path):
        logger.warning(f"⚠️ Weights file does not exist: {weights_path}")
        logger.info("   Will use pretrained model yolov10n.pt")
    else:
        try:
            checkpoint = torch.load(weights_path, map_location='cpu')
            logger.info(f"✅ Weights file exists: {weights_path}")
            if isinstance(checkpoint, dict):
                keys = list(checkpoint.keys())[:5]
                logger.info(f"   Keys included: {keys}...")
        except Exception as e:
            logger.error(f"❌ Weights file corrupted: {e}")

    # 5. Test YOLO loading
    logger.info("\n[5/7] Testing YOLO model loading...")
    try:
        if os.path.exists(weights_path):
            model = YOLO(weights_path)
            logger.info(f"✅ Successfully loaded custom weights")
        else:
            model = YOLO('yolov10n.pt')
            logger.info(f"✅ Successfully loaded pretrained weights")
    except Exception as e:
        logger.error(f"❌ YOLO loading failed: {e}")
        return

    # 6. Test inference
    logger.info("\n[6/7] Testing single image inference...")
    if len(train_images) > 0:
        test_img = train_images[0]
        logger.info(f"   Test image: {test_img.name}")

        try:
            results = model.predict(
                source=str(test_img),
                conf=0.01,
                iou=0.5,
                verbose=False
            )

            num_detections = len(results[0].boxes)
            logger.info(f"   Detected {num_detections} objects (conf>0.01)")

            if num_detections == 0:
                logger.error(f"   ❌ No detections! This is abnormal")
                logger.info(f"   Suggestions: 1) Check if model was trained")
                logger.info(f"                2) Check if images and labels match")
                logger.info(f"                3) Retrain Stage 1")
            else:
                logger.info(f"   ✅ Detection working normally")

                # Visualization
                img = Image.open(test_img)
                fig, ax = plt.subplots(1, figsize=(12, 8))
                ax.imshow(img)

                for box in results[0].boxes.xyxy[:10]:
                    x1, y1, x2, y2 = box.cpu().numpy()
                    rect = patches.Rectangle(
                        (x1, y1), x2 - x1, y2 - y1,
                        linewidth=2, edgecolor='red', facecolor='none'
                    )
                    ax.add_patch(rect)

                ax.axis('off')
                plt.tight_layout()
                output_path = os.path.join(output_dir, 'stage1_test_detection.png')
                plt.savefig(output_path, dpi=150, bbox_inches='tight')
                plt.close()
                logger.info(f"   Visualization saved to: {output_path}")

        except Exception as e:
            logger.error(f"   ❌ Inference failed: {e}")

    # 7. Check training history
    logger.info("\n[7/7] Checking training history...")
    train_dir = Path('runs/train/stage1_proposer')

    if train_dir.exists():
        logger.info(f"✅ Found training records: {train_dir}")

        results_csv = train_dir / 'results.csv'
        if results_csv.exists():
            import pandas as pd

            try:
                df = pd.read_csv(results_csv)
                logger.info(f"   Training epochs: {len(df)}")

                if len(df) > 0:
                    last_rows = df.tail(5)
                    logger.info("\n   Last 5 epochs metrics:")
                    for idx, row in last_rows.iterrows():
                        epoch = int(row.get('epoch', idx))
                        train_loss = row.get('train/box_loss', 0) + row.get('train/cls_loss', 0)
                        val_map = row.get('metrics/mAP50(B)', 0)
                        logger.info(f"   Epoch {epoch}: Loss={train_loss:.3f}, mAP50={val_map:.3f}")

                    final_map = df['metrics/mAP50(B)'].iloc[-1] if 'metrics/mAP50(B)' in df.columns else 0

                    if final_map < 0.1:
                        logger.error(f"\n   ❌ Final mAP50={final_map:.3f} too low! Training failed")
                        logger.info(f"      Possible reasons:")
                        logger.info(f"      1. Not enough training epochs (current {len(df)} epochs)")
                        logger.info(f"      2. Inappropriate learning rate")
                        logger.info(f"      3. Issues with data annotation")
                    elif final_map < 0.3:
                        logger.warning(f"\n   ⚠️ Final mAP50={final_map:.3f} barely usable, recommend retraining")
                    else:
                        logger.info(f"\n   ✅ Final mAP50={final_map:.3f} training successful")

            except Exception as e:
                logger.warning(f"   ⚠️ Cannot read results.csv: {e}")
        else:
            logger.warning(f"   ⚠️ results.csv not found")
    else:
        logger.warning(f"⚠️ Training records not found, model may have never been trained")

    logger.info("\n" + "=" * 80)
    logger.info("Diagnosis complete!")
    logger.info("=" * 80)


def diagnose_stage1_on_test(data_yaml: str = 'dataset/data.yaml',
                            weights_path: str = 'weights/stage1_proposer.pt',
                            device: str = 'cuda') -> None:
    """
    Diagnose Stage 1 performance on test set.

    Args:
        data_yaml: Path to data.yaml configuration
        weights_path: Path to Stage 1 weights
        device: Device to use ('cuda' or 'cpu')
    """
    from src.models.proposer import YOLOProposer

    logger.info("=" * 80)
    logger.info("Stage 1 Performance on Test Set")
    logger.info("=" * 80)

    # Load proposer
    try:
        proposer = YOLOProposer(weights_path, device)
    except Exception as e:
        logger.error(f"Failed to load proposer: {e}")
        return

    # Get test set
    with open(data_yaml) as f:
        config = yaml.safe_load(f)

    test_dir = Path(data_yaml).parent / config['test']
    test_images = list(test_dir.glob('*.jpg')) + list(test_dir.glob('*.png'))

    logger.info(f"\nTest set images: {len(test_images)}")

    # Test different thresholds
    thresholds = [0.001, 0.01, 0.1, 0.25, 0.5]

    logger.info(f"\n{'Threshold':<12} {'Avg proposals/img':<20} {'No proposals':<20}")
    logger.info("-" * 52)

    for thresh in thresholds:
        proposals_counts = []
        zero_count = 0

        for img_path in tqdm(test_images, desc=f"Threshold {thresh}", leave=False, ncols=80):
            rois = proposer.propose(
                str(img_path),
                conf_thresh=thresh,
                iou_thresh=0.5
            )

            proposals_counts.append(len(rois))
            if len(rois) == 0:
                zero_count += 1

        avg = np.mean(proposals_counts)
        zero_pct = zero_count / len(test_images) * 100

        logger.info(f"{thresh:<12.3f} {avg:<20.1f} {zero_count} ({zero_pct:.1f}%)")

    # Detailed analysis at lowest threshold
    logger.info(f"\n{'=' * 80}")
    logger.info("Detailed Analysis (threshold=0.001)")
    logger.info(f"{'=' * 80}")

    proposals_counts = []
    for img_path in tqdm(test_images, desc="Generating proposals"):
        rois = proposer.propose(
            str(img_path),
            conf_thresh=0.001,
            iou_thresh=0.5
        )
        proposals_counts.append(len(rois))

    logger.info(f"\nStatistics:")
    logger.info(f"  Min: {min(proposals_counts)}")
    logger.info(f"  Max: {max(proposals_counts)}")
    logger.info(f"  Average: {np.mean(proposals_counts):.1f}")
    logger.info(f"  Median: {np.median(proposals_counts):.1f}")
    logger.info(f"  0 proposals: {sum(1 for x in proposals_counts if x == 0)} images")
    logger.info(f"  <5 proposals: {sum(1 for x in proposals_counts if x < 5)} images")
    logger.info(f"  >=10 proposals: {sum(1 for x in proposals_counts if x >= 10)} images")

    logger.info(f"\n{'=' * 80}")
    logger.info("Comparison with Training Set")
    logger.info(f"{'=' * 80}")
    logger.info(f"Training set (conf=0.001): Average 8.8/image")
    logger.info(f"Test set (conf=0.001): Average {np.mean(proposals_counts):.1f}/image")

    if np.mean(proposals_counts) < 5:
        logger.error(f"\n❌ Stage 1 performs poorly on test set!")
        logger.error(f"   This is the root cause of low mAP:")
        logger.error(f"   - Stage 1 on training set: 8.8/image ✅")
        logger.error(f"   - Stage 1 on test set: {np.mean(proposals_counts):.1f}/image ❌")
        logger.info(f"\nSolutions:")
        logger.info(f"   1. Check if train/test distribution is consistent")
        logger.info(f"   2. Stage 1 may need retraining")
        logger.info(f"   3. Consider adjusting threshold on test set")
    elif np.mean(proposals_counts) < 8.8 * 0.7:
        logger.warning(f"\n⚠️ Stage 1 performance drops significantly on test set")
        logger.warning(f"   Dropped by {(1 - np.mean(proposals_counts) / 8.8) * 100:.1f}%")
    else:
        logger.info(f"\n✅ Stage 1 performs normally on test set")
        logger.info(f"   The issue is in Stage 2!")
