"""Quality check tools for proposals and data."""
import json
import logging
import random
from pathlib import Path
from typing import Optional
from collections import Counter, defaultdict

import numpy as np
import yaml
import matplotlib.pyplot as plt

from src.utils.bbox_utils import compute_iou

logger = logging.getLogger(__name__)


def check_proposals(proposals_json: str = 'proposals/proposals.json',
                    sample_size: int = 100) -> None:
    """
    Analyze proposal quality.

    Args:
        proposals_json: Path to proposals JSON file
        sample_size: Number of images to sample for analysis
    """
    logger.info("=" * 80)
    logger.info("Proposals Quality Analysis")
    logger.info("=" * 80)

    with open(proposals_json, 'r') as f:
        data = json.load(f)

    # Statistics
    total_images = len(data)
    total_proposals = sum(len(item['rois']) for item in data)
    total_gt_boxes = sum(len(item['labels']) for item in data)

    proposals_per_image = [len(item['rois']) for item in data]
    gt_per_image = [len(item['labels']) for item in data]

    logger.info(f"\nBasic statistics:")
    logger.info(f"  Total images: {total_images}")
    logger.info(f"  Total proposals: {total_proposals}")
    logger.info(f"  Total GT boxes: {total_gt_boxes}")
    logger.info(f"  Average proposals/image: {np.mean(proposals_per_image):.1f}")
    logger.info(f"  Average GT boxes/image: {np.mean(gt_per_image):.1f}")

    # Check anomalies
    zero_proposal_imgs = sum(1 for p in proposals_per_image if p == 0)
    too_many_proposal_imgs = sum(1 for p in proposals_per_image if p > 100)

    logger.info(f"\nAnomalies:")
    logger.info(f"  Images with no proposals: {zero_proposal_imgs} ({zero_proposal_imgs / total_images * 100:.1f}%)")
    logger.info(f"  Images with too many proposals (>100): {too_many_proposal_imgs}")

    # Analyze proposal-GT matching rate
    sample_size = min(sample_size, total_images)
    sampled = random.sample(data, sample_size)

    recall_at_50 = []
    for item in sampled:
        if len(item['labels']) == 0:
            continue

        matched = 0
        for gt_box in item['labels']:
            gt_bbox = gt_box[1:]

            best_iou = 0
            for proposal in item['rois']:
                iou = compute_iou(proposal, gt_bbox)
                best_iou = max(best_iou, iou)

            if best_iou > 0.5:
                matched += 1

        recall = matched / len(item['labels']) if len(item['labels']) > 0 else 0
        recall_at_50.append(recall)

    avg_recall = np.mean(recall_at_50)
    logger.info(f"\nMatching quality (sampled {sample_size} images):")
    logger.info(f"  Average recall@IoU0.5: {avg_recall:.3f}")

    # Quality assessment
    logger.info(f"\nQuality assessment:")
    if avg_recall < 0.5:
        logger.error(f"  ❌ Recall too low! Proposals don't cover most GT objects")
        logger.info(f"     Suggestion: Lower confidence threshold to regenerate proposals")
        logger.info(f"     Command: python main.py gen-proposals --conf-thresh 0.01")
    elif avg_recall < 0.8:
        logger.warning(f"  ⚠️ Average recall, can be improved")
        logger.info(f"     Suggestion: Lower confidence threshold or increase overlap region")
    else:
        logger.info(f"  ✅ Good recall! Proposal quality is decent")

    if np.mean(proposals_per_image) < 5:
        logger.error(f"  ❌ Too few proposals!")
        logger.info(f"     Suggestion: Lower confidence threshold")
    elif np.mean(proposals_per_image) > 50:
        logger.warning(f"  ⚠️ Too many proposals, may affect training efficiency")
    else:
        logger.info(f"  ✅ Reasonable number of proposals")

    logger.info("=" * 80)


def analyze_iou_distribution(data_yaml: str = 'dataset/data.yaml',
                             proposals_json: str = 'proposals/proposals.json',
                             output_file: str = 'iou_analysis.png') -> None:
    """
    Analyze IoU distribution across different classes and images.

    Args:
        data_yaml: Path to data.yaml
        proposals_json: Path to proposals JSON
        output_file: Path to save visualization
    """
    logger.info("=" * 80)
    logger.info("Detailed IoU Distribution Analysis")
    logger.info("=" * 80)

    # Load config
    with open(data_yaml) as f:
        config = yaml.safe_load(f)
        class_names = config['names']

    # Load proposals
    with open(proposals_json) as f:
        proposals_data = json.load(f)

    # Statistics by class
    class_ious = defaultdict(list)
    image_ious = []
    image_proposal_counts = []

    for item in proposals_data:
        rois = item['rois']
        gts = item['labels']

        image_proposal_counts.append(len(rois))

        if len(gts) == 0:
            continue

        img_best_ious = []

        for gt in gts:
            gt_class = int(gt[0])
            gt_bbox = gt[1:]

            best_iou = 0
            for roi in rois:
                iou = compute_iou(roi, gt_bbox)
                best_iou = max(best_iou, iou)

            class_ious[gt_class].append(best_iou)
            img_best_ious.append(best_iou)

        if img_best_ious:
            image_ious.append(np.mean(img_best_ious))

    logger.info(f"\nOverall statistics:")
    logger.info(f"  Total images: {len(proposals_data)}")
    logger.info(f"  Images with GT: {len(image_ious)}")
    logger.info(f"  Average proposals/image: {np.mean(image_proposal_counts):.1f}")

    logger.info(f"\nPer-image average IoU distribution:")
    logger.info(f"  Min: {min(image_ious):.3f}")
    logger.info(f"  25th percentile: {np.percentile(image_ious, 25):.3f}")
    logger.info(f"  Median: {np.median(image_ious):.3f}")
    logger.info(f"  75th percentile: {np.percentile(image_ious, 75):.3f}")
    logger.info(f"  Max: {max(image_ious):.3f}")
    logger.info(f"  Average: {np.mean(image_ious):.3f}")

    # Per-class statistics
    logger.info(f"\nPer-class IoU statistics:")
    logger.info(f"{'Class':<15} {'Count':>6} {'Avg IoU':>10} {'Median':>10} {'>0.5 ratio':>10}")
    logger.info("-" * 60)

    for cls_id in sorted(class_ious.keys()):
        ious = class_ious[cls_id]
        cls_name = class_names[cls_id]
        avg_iou = np.mean(ious)
        median_iou = np.median(ious)
        good_ratio = sum(1 for iou in ious if iou > 0.5) / len(ious)

        logger.info(f"{cls_name:<15} {len(ious):6d} {avg_iou:10.3f} {median_iou:10.3f} {good_ratio:9.1%}")

    # Find difficult images with low IoU
    bad_images = []
    for i, item in enumerate(proposals_data):
        if len(item['labels']) == 0:
            continue

        rois = item['rois']
        gts = item['labels']

        img_ious = []
        for gt in gts:
            gt_bbox = gt[1:]
            best_iou = 0
            for roi in rois:
                iou = compute_iou(roi, gt_bbox)
                best_iou = max(best_iou, iou)
            img_ious.append(best_iou)

        avg_iou = np.mean(img_ious) if img_ious else 0
        if avg_iou < 0.3:
            bad_images.append({
                'path': item['img_path'],
                'avg_iou': avg_iou,
                'num_gts': len(gts),
                'num_proposals': len(rois)
            })

    logger.info(f"\nDifficult images with IoU<0.3: {len(bad_images)}")
    if len(bad_images) > 0:
        logger.info(f"Examples (first 10):")
        for img in sorted(bad_images, key=lambda x: x['avg_iou'])[:10]:
            logger.info(f"  {Path(img['path']).name}")
            logger.info(f"    Avg IoU: {img['avg_iou']:.3f}, GT count: {img['num_gts']}, Proposals: {img['num_proposals']}")

    # Visualization
    fig, axes = plt.subplots(2, 2, figsize=(12, 10))

    # Plot 1: Per-image average IoU distribution
    axes[0, 0].hist(image_ious, bins=50, edgecolor='black')
    axes[0, 0].axvline(x=0.5, color='red', linestyle='--', label='0.5 threshold')
    axes[0, 0].set_xlabel('Average IoU')
    axes[0, 0].set_ylabel('Number of Images')
    axes[0, 0].set_title('Per-Image Average IoU Distribution')
    axes[0, 0].legend()
    axes[0, 0].grid(True, alpha=0.3)

    # Plot 2: Per-class IoU comparison
    cls_names = [class_names[cls_id] for cls_id in sorted(class_ious.keys())]
    cls_avg_ious = [np.mean(class_ious[cls_id]) for cls_id in sorted(class_ious.keys())]
    axes[0, 1].bar(cls_names, cls_avg_ious, edgecolor='black')
    axes[0, 1].axhline(y=0.5, color='red', linestyle='--', label='0.5 threshold')
    axes[0, 1].set_ylabel('Average IoU')
    axes[0, 1].set_title('Per-Class Average IoU')
    axes[0, 1].tick_params(axis='x', rotation=45)
    axes[0, 1].legend()
    axes[0, 1].grid(True, alpha=0.3)

    # Plot 3: Proposals count vs IoU
    scatter_x = []
    scatter_y = []
    for item in proposals_data:
        if len(item['labels']) == 0:
            continue

        num_proposals = len(item['rois'])

        img_ious = []
        for gt in item['labels']:
            gt_bbox = gt[1:]
            best_iou = 0
            for roi in item['rois']:
                iou = compute_iou(roi, gt_bbox)
                best_iou = max(best_iou, iou)
            img_ious.append(best_iou)

        avg_iou = np.mean(img_ious) if img_ious else 0
        scatter_x.append(num_proposals)
        scatter_y.append(avg_iou)

    axes[1, 0].scatter(scatter_x, scatter_y, alpha=0.3)
    axes[1, 0].axhline(y=0.5, color='red', linestyle='--', label='IoU=0.5')
    axes[1, 0].set_xlabel('Number of Proposals')
    axes[1, 0].set_ylabel('Average IoU')
    axes[1, 0].set_title('Proposals Count vs IoU')
    axes[1, 0].legend()
    axes[1, 0].grid(True, alpha=0.3)

    # Plot 4: Cumulative distribution
    sorted_ious = sorted(image_ious)
    cumulative = np.arange(1, len(sorted_ious) + 1) / len(sorted_ious)
    axes[1, 1].plot(sorted_ious, cumulative, linewidth=2)
    axes[1, 1].axvline(x=0.5, color='red', linestyle='--', label='IoU=0.5')
    axes[1, 1].set_xlabel('IoU Threshold')
    axes[1, 1].set_ylabel('Cumulative Ratio')
    axes[1, 1].set_title('IoU Cumulative Distribution')
    axes[1, 1].legend()
    axes[1, 1].grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig(output_file, dpi=150)
    plt.close()
    logger.info(f"\nVisualization saved to: {output_file}")

    logger.info(f"\n{'=' * 80}")
    logger.info("Conclusions:")
    logger.info(f"{'=' * 80}")

    good_images = sum(1 for iou in image_ious if iou > 0.5)
    good_pct = good_images / len(image_ious)

    if good_pct > 0.7:
        logger.info("✅ Most images (>70%) have good IoU")
        logger.info("   Can directly train Stage 2")
    elif good_pct > 0.4:
        logger.warning("⚠️ Some images have good IoU, some are poor")
        logger.warning(f"   {good_pct * 100:.1f}% of images have IoU>0.5")
        logger.info("   Suggestions:")
        logger.info("   1. Try training Stage 2 with current proposals first")
        logger.info("   2. Or filter out difficult samples with IoU<0.3")
    else:
        logger.error("❌ Most images have poor IoU")
        logger.error("   Need to:")
        logger.error("   1. Retrain Stage 1")
        logger.error("   2. Or abandon two-stage architecture")
