"""Visualization tools for model predictions and proposals."""
import logging
import random
from pathlib import Path
from typing import Optional, List

import torch
import yaml
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from torchvision import transforms

from src.utils.bbox_utils import compute_iou

logger = logging.getLogger(__name__)


def visualize_stage1_quality(data_yaml: str = 'dataset/data.yaml',
                             weights_path: str = 'weights/stage1_proposer.pt',
                             device: str = 'cuda',
                             num_images: int = 5,
                             output_prefix: str = 'stage1_quality',
                             conf_thresh: float = 0.01) -> None:
    """
    Visualize Stage 1 prediction quality.

    Args:
        data_yaml: Path to data.yaml
        weights_path: Path to Stage 1 weights
        device: Device to use
        num_images: Number of images to visualize
        output_prefix: Prefix for output files
        conf_thresh: Confidence threshold for proposals
    """
    from src.models.proposer import YOLOProposer

    logger.info("=" * 80)
    logger.info("Visualizing Stage 1 Prediction Quality")
    logger.info("=" * 80)

    proposer = YOLOProposer(weights_path, device)

    with open(data_yaml) as f:
        config = yaml.safe_load(f)
        class_names = config['names']

    # Training set
    train_dir = Path(data_yaml).parent / config['train']
    train_label_dir = Path(data_yaml).parent / config['train'].replace('images', 'labels')
    train_images = list(train_dir.glob('*.jpg')) + list(train_dir.glob('*.png'))

    # Randomly select images with annotations
    sampled = []
    for img_path in random.sample(train_images, min(50, len(train_images))):
        label_path = train_label_dir / f"{img_path.stem}.txt"
        if label_path.exists():
            with open(label_path) as f:
                if len(f.readlines()) > 0:
                    sampled.append(img_path)
                    if len(sampled) >= num_images:
                        break

    for idx, img_path in enumerate(sampled, 1):
        logger.info(f"\nImage {idx}: {img_path.name}")

        # Load image
        img = Image.open(img_path).convert('RGB')
        w, h = img.size

        # Load GT
        label_path = train_label_dir / f"{img_path.stem}.txt"
        gts = []
        with open(label_path) as f:
            for line in f:
                parts = line.strip().split()
                if len(parts) == 5:
                    cls, cx, cy, bw, bh = map(float, parts)
                    x1 = (cx - bw / 2) * w
                    y1 = (cy - bh / 2) * h
                    x2 = (cx + bw / 2) * w
                    y2 = (cy + bh / 2) * h
                    gts.append({
                        'class': int(cls),
                        'bbox': [x1, y1, x2, y2]
                    })

        logger.info(f"  GT: {len(gts)} objects")

        # Stage 1 predictions
        proposals = proposer.propose(
            str(img_path),
            conf_thresh=conf_thresh,
            iou_thresh=0.5
        )

        logger.info(f"  Proposals: {len(proposals)} objects")

        # Find best matches
        for gt_idx, gt in enumerate(gts):
            best_iou = 0
            for prop in proposals:
                iou = compute_iou(prop, gt['bbox'])
                if iou > best_iou:
                    best_iou = iou

            logger.info(f"    GT[{gt_idx}] {class_names[gt['class']]}: Best IoU={best_iou:.3f}")

        # Visualization
        vis_img = img.copy()
        draw = ImageDraw.Draw(vis_img)

        # Draw GT (green thick lines)
        for gt in gts:
            draw.rectangle(gt['bbox'], outline='lime', width=4)
            draw.text((gt['bbox'][0], gt['bbox'][1] - 20),
                      f"GT: {class_names[gt['class']]}",
                      fill='lime')

        # Draw all proposals (red thin lines)
        for prop in proposals[:50]:  # Max 50 proposals
            draw.rectangle(prop.tolist(), outline='red', width=1)

        # Draw best matched proposals (yellow thick lines)
        for gt in gts:
            best_iou = 0
            best_prop = None
            for prop in proposals:
                iou = compute_iou(prop, gt['bbox'])
                if iou > best_iou:
                    best_iou = iou
                    best_prop = prop

            if best_prop is not None:
                draw.rectangle(best_prop.tolist(), outline='yellow', width=3)
                draw.text((best_prop[0], best_prop[1] + 10),
                          f"Best: IoU={best_iou:.2f}",
                          fill='yellow')

        # Save
        output = f'{output_prefix}_{idx}.png'
        vis_img.save(output)
        logger.info(f"  Saved: {output}")

    logger.info(f"\n{'=' * 80}")
    logger.info("Legend:")
    logger.info("  Green thick box = Ground Truth")
    logger.info("  Red thin box = All Proposals")
    logger.info("  Yellow thick box = Best matched Proposal with GT")
    logger.info("=" * 80)


def visualize_predictions(data_yaml: str = 'dataset/data.yaml',
                          stage1_weights: str = 'weights/stage1_proposer.pt',
                          stage2_weights: Optional[str] = None,
                          device: str = 'cuda',
                          num_images: int = 5,
                          output_prefix: str = 'vis_pred') -> None:
    """
    Visualize end-to-end predictions.

    Args:
        data_yaml: Path to data.yaml
        stage1_weights: Path to Stage 1 weights
        stage2_weights: Path to Stage 2 weights
        device: Device to use
        num_images: Number of images to visualize
        output_prefix: Prefix for output files
    """
    from src.config import STAGE2_CONFIG, DEVICE
    from src.models.proposer import YOLOProposer
    from src.models.refiner import ROIRefinerModel

    stage2_weights = stage2_weights or STAGE2_CONFIG['weights_path']
    device = device or DEVICE

    logger.info("=" * 80)
    logger.info("Visualizing Predictions")
    logger.info("=" * 80)

    # Load models
    with open(data_yaml) as f:
        data = yaml.safe_load(f)
        class_names = data['names']
        num_classes = data['nc']

    proposer = YOLOProposer(stage1_weights, device)
    refiner = ROIRefinerModel(device=device)

    checkpoint = torch.load(stage2_weights, map_location=device, weights_only=False)
    refiner.load_state_dict(checkpoint.get('model_state_dict', checkpoint))
    refiner.eval()

    # Test set
    test_dir = Path(data_yaml).parent / data['test']
    test_label_dir = Path(data_yaml).parent / data['test'].replace('images', 'labels')
    test_images = list(test_dir.glob('*.jpg')) + list(test_dir.glob('*.png'))

    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])

    # Randomly select images with annotations
    sampled_images = []
    for img_path in random.sample(test_images, min(50, len(test_images))):
        label_path = test_label_dir / f"{img_path.stem}.txt"
        if label_path.exists():
            with open(label_path) as f:
                if len(f.readlines()) > 0:
                    sampled_images.append(img_path)
                    if len(sampled_images) >= num_images:
                        break

    logger.info(f"\nProcessing {len(sampled_images)} images...")

    for img_idx, img_path in enumerate(sampled_images, 1):
        logger.info(f"\nImage {img_idx}: {img_path.name}")

        # Load image
        full_image = Image.open(img_path).convert("RGB")
        w, h = full_image.size

        # Load GT
        label_path = test_label_dir / f"{img_path.stem}.txt"
        ground_truths = []

        with open(label_path) as f:
            for line in f:
                parts = line.strip().split()
                if len(parts) == 5:
                    cls, cx, cy, bw, bh = map(float, parts)
                    x1 = (cx - bw / 2) * w
                    y1 = (cy - bh / 2) * h
                    x2 = (cx + bw / 2) * w
                    y2 = (cy + bh / 2) * h
                    ground_truths.append({
                        'class': int(cls),
                        'bbox': [x1, y1, x2, y2]
                    })

        logger.info(f"  Ground truth: {len(ground_truths)} objects")
        for gt in ground_truths:
            logger.info(f"    - {class_names[gt['class']]}")

        # Stage 1: Generate proposals
        rois = proposer.propose(str(img_path), conf_thresh=0.001, iou_thresh=0.5)
        logger.info(f"  Proposals: {len(rois)} objects")

        if len(rois) == 0:
            logger.warning("  ⚠️ No proposals")
            continue

        # Stage 2: Refine
        roi_batch = []
        valid_rois = []

        for box in rois:
            x1, y1, x2, y2 = map(int, box)
            x1, y1 = max(0, x1), max(0, y1)
            x2, y2 = min(w, x2), min(h, y2)

            if x2 > x1 and y2 > y1:
                roi_img = full_image.crop((x1, y1, x2, y2))
                roi_batch.append(transform(roi_img))
                valid_rois.append([x1, y1, x2, y2])

        if len(roi_batch) == 0:
            continue

        roi_tensors = torch.stack(roi_batch).to(device)

        with torch.no_grad():
            class_logits, bbox_deltas = refiner(roi_tensors)

        scores = torch.softmax(class_logits, dim=1)
        class_probs, class_preds = torch.max(scores, dim=1)

        # Decode predictions
        predictions = []
        for i, roi in enumerate(valid_rois):
            prob = class_probs[i].item()
            cls_id = class_preds[i].item()

            # Model outputs: 0 = background, 1-num_classes = foreground
            if cls_id == 0:  # Background
                continue

            # Apply bbox regression
            # Map cls_id (1-num_classes) to bbox index (0-num_classes-1)
            bbox_idx = cls_id - 1
            delta = bbox_deltas[i, bbox_idx * 4:(bbox_idx + 1) * 4].cpu().numpy()
            roi_w = roi[2] - roi[0]
            roi_h = roi[3] - roi[1]
            roi_cx = roi[0] + 0.5 * roi_w
            roi_cy = roi[1] + 0.5 * roi_h

            pred_cx = roi_cx + delta[0] * roi_w
            pred_cy = roi_cy + delta[1] * roi_h
            pred_w = roi_w * np.exp(delta[2])
            pred_h = roi_h * np.exp(delta[3])

            pred_x1 = pred_cx - 0.5 * pred_w
            pred_y1 = pred_cy - 0.5 * pred_h
            pred_x2 = pred_cx + 0.5 * pred_w
            pred_y2 = pred_cy + 0.5 * pred_h

            # Map cls_id (1-num_classes) to class_names index (0-num_classes-1)
            class_name_idx = cls_id - 1

            predictions.append({
                'class': cls_id,
                'class_name': class_names[class_name_idx],
                'confidence': prob,
                'bbox': [pred_x1, pred_y1, pred_x2, pred_y2],
                'roi': roi
            })

        logger.info(f"  Predictions: {len(predictions)} objects")

        # Show top 3 predictions
        predictions_sorted = sorted(predictions, key=lambda x: x['confidence'], reverse=True)
        for pred in predictions_sorted[:3]:
            logger.info(f"    - {pred['class_name']} ({pred['confidence']:.3f})")

        # Visualization
        vis_image = full_image.copy()
        draw = ImageDraw.Draw(vis_image)

        # Draw GT (green)
        for gt in ground_truths:
            bbox = gt['bbox']
            draw.rectangle(bbox, outline='lime', width=3)
            draw.text((bbox[0], bbox[1] - 15),
                      f"GT: {class_names[gt['class']]}",
                      fill='lime')

        # Draw predictions (red) - only top 3
        for pred in predictions_sorted[:3]:
            bbox = pred['bbox']
            draw.rectangle(bbox, outline='red', width=2)
            draw.text((bbox[0], bbox[1] + 5),
                      f"Pred: {pred['class_name']} {pred['confidence']:.2f}",
                      fill='red')

        # Save
        output_path = f'{output_prefix}_{img_idx}_{img_path.stem}.png'
        vis_image.save(output_path)
        logger.info(f"  Visualization saved: {output_path}")

    logger.info(f"\n{'=' * 80}")
    logger.info("Visualization complete")
    logger.info(f"{'=' * 80}")
    logger.info("\nLegend:")
    logger.info("  Green box = Ground Truth")
    logger.info("  Red box = Model Prediction")
