"""
 Stage 1 Proposal 

proposals



- Proposal Recall: proposals
- Mean Max IoU: proposalIoU
- Proposals: 

 = Recall * w1 + MeanIoU * w2 - log(proposals_count) * w3
"""

import os
import sys
import json
import yaml
import numpy as np
import torch
from pathlib import Path
from typing import Dict, List, Tuple, Optional
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from dataclasses import dataclass, asdict

# 
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))

from src.config import DATA_YAML, STAGE1_CONFIG, WEIGHTS_DIR
from src.utils.bbox_utils import compute_iou


@dataclass
class ProposalQualityMetrics:
    """Proposal"""
    threshold: float
    recall_at_05: float  # IoU=0.5
    recall_at_075: float  # IoU=0.75
    mean_max_iou: float  # IoU
    avg_proposals_per_image: float  # proposals
    total_images: int
    total_gt_boxes: int
    total_proposals: int
    composite_score: float  # 

    def to_dict(self):
        return asdict(self)


class ProposalThresholdOptimizer:
    """Proposal"""

    def __init__(
        self,
        data_yaml: str,
        stage1_weights: str,
        device: str = 'cuda',
        recall_weight: float = 1.0,
        iou_weight: float = 0.8,
        count_penalty: float = 0.1,
        target_proposals_range: Tuple[int, int] = (20, 100)
    ):
        """
        Args:
            data_yaml: 
            stage1_weights: Stage 1
            device: 
            recall_weight: 
            iou_weight: IoU
            count_penalty: proposals
            target_proposals_range: proposals
        """
        self.data_yaml = data_yaml
        self.stage1_weights = stage1_weights
        self.device = device
        self.recall_weight = recall_weight
        self.iou_weight = iou_weight
        self.count_penalty = count_penalty
        self.target_proposals_range = target_proposals_range

        # 
        with open(data_yaml, 'r', encoding='utf-8') as f:
            self.data_config = yaml.safe_load(f)

        # 
        self._load_model()

    def _load_model(self):
        """Stage 1"""
        from ultralytics import YOLO

        print(f"Loading Stage 1 model: {self.stage1_weights}")
        self.model = YOLO(self.stage1_weights)
        self.model.to(self.device)
        print(" Model loaded successfully")

    def _get_val_images_and_labels(self) -> List[Tuple[str, List[Dict]]]:
        """"""
        dataset_root = Path(self.data_yaml).parent

        # 
        val_images_dir = dataset_root / 'val' / 'images'
        val_labels_dir = dataset_root / 'val' / 'labels'

        if not val_images_dir.exists():
            raise ValueError(f": {val_images_dir}")

        # 
        data = []
        image_files = list(val_images_dir.glob('*.jpg')) + list(val_images_dir.glob('*.png'))

        for img_file in image_files:
            label_file = val_labels_dir / (img_file.stem + '.txt')

            # 
            gt_boxes = []
            if label_file.exists():
                with open(label_file, 'r') as f:
                    for line in f:
                        parts = line.strip().split()
                        if len(parts) >= 5:
                            cls, x_center, y_center, width, height = map(float, parts[:5])
                            gt_boxes.append({
                                'class': int(cls),
                                'x_center': x_center,
                                'y_center': y_center,
                                'width': width,
                                'height': height
                            })

            if gt_boxes:  # 
                data.append((str(img_file), gt_boxes))

        print(f" Loaded {len(data)} validation images with annotations")
        return data

    def _yolo_to_xyxy(self, boxes: List[Dict], img_width: int, img_height: int) -> np.ndarray:
        """YOLOxyxy"""
        if not boxes:
            return np.array([]).reshape(0, 4)

        xyxy_boxes = []
        for box in boxes:
            x_center = box['x_center'] * img_width
            y_center = box['y_center'] * img_height
            width = box['width'] * img_width
            height = box['height'] * img_height

            x1 = x_center - width / 2
            y1 = y_center - height / 2
            x2 = x_center + width / 2
            y2 = y_center + height / 2

            xyxy_boxes.append([x1, y1, x2, y2])

        return np.array(xyxy_boxes)

    def _evaluate_proposals_for_threshold(
        self,
        threshold: float,
        val_data: List[Tuple[str, List[Dict]]],
        sample_size: Optional[int] = None
    ) -> ProposalQualityMetrics:
        """proposals"""

        if sample_size and sample_size < len(val_data):
            # 
            import random
            val_data = random.sample(val_data, sample_size)

        total_images = len(val_data)
        total_gt_boxes = 0
        total_proposals = 0

        recalls_05 = []
        recalls_075 = []
        max_ious = []
        proposals_counts = []

        print(f"\n Testing threshold: {threshold:.3f}")

        for img_path, gt_boxes in tqdm(val_data, desc=f"Threshold {threshold:.3f}"):
            # 
            from PIL import Image
            img = Image.open(img_path)
            img_width, img_height = img.size

            # 
            results = self.model.predict(
                img_path,
                conf=threshold,
                iou=0.45,
                verbose=False,
                device=self.device
            )[0]

            # proposals
            if results.boxes is not None and len(results.boxes) > 0:
                proposals_xyxy = results.boxes.xyxy.cpu().numpy()
            else:
                proposals_xyxy = np.array([]).reshape(0, 4)

            # GT boxes
            gt_xyxy = self._yolo_to_xyxy(gt_boxes, img_width, img_height)

            total_gt_boxes += len(gt_xyxy)
            total_proposals += len(proposals_xyxy)
            proposals_counts.append(len(proposals_xyxy))

            if len(gt_xyxy) == 0:
                continue

            if len(proposals_xyxy) == 0:
                # proposals0
                recalls_05.append(0.0)
                recalls_075.append(0.0)
                max_ious.extend([0.0] * len(gt_xyxy))
                continue

            # IoU [num_gt, num_proposals]
            iou_matrix = compute_iou(
                gt_xyxy,
                proposals_xyxy
            ).numpy()

            # GT boxIoU
            max_iou_per_gt = iou_matrix.max(axis=1)
            max_ious.extend(max_iou_per_gt.tolist())

            # 
            recall_05 = (max_iou_per_gt >= 0.5).mean()
            recall_075 = (max_iou_per_gt >= 0.75).mean()

            recalls_05.append(recall_05)
            recalls_075.append(recall_075)

        # 
        avg_recall_05 = np.mean(recalls_05) if recalls_05 else 0.0
        avg_recall_075 = np.mean(recalls_075) if recalls_075 else 0.0
        mean_max_iou = np.mean(max_ious) if max_ious else 0.0
        avg_proposals = np.mean(proposals_counts)

        # 
        # IoUproposals
        composite_score = self._compute_composite_score(
            avg_recall_05,
            mean_max_iou,
            avg_proposals
        )

        metrics = ProposalQualityMetrics(
            threshold=threshold,
            recall_at_05=avg_recall_05,
            recall_at_075=avg_recall_075,
            mean_max_iou=mean_max_iou,
            avg_proposals_per_image=avg_proposals,
            total_images=total_images,
            total_gt_boxes=total_gt_boxes,
            total_proposals=total_proposals,
            composite_score=composite_score
        )

        return metrics

    def _compute_composite_score(
        self,
        recall: float,
        mean_iou: float,
        avg_proposals: float
    ) -> float:
        """"""

        # Proposals
        # 
        min_target, max_target = self.target_proposals_range

        if avg_proposals < min_target:
            # 
            count_score = avg_proposals / min_target
        elif avg_proposals > max_target:
            # 
            count_score = 1.0 - self.count_penalty * np.log(1 + (avg_proposals - max_target) / max_target)
        else:
            # 
            count_score = 1.0

        # 
        score = (
            self.recall_weight * recall +
            self.iou_weight * mean_iou +
            count_score
        ) / (self.recall_weight + self.iou_weight + 1.0)

        return score

    def optimize(
        self,
        threshold_range: Tuple[float, float] = (0.001, 0.1),
        num_points: int = 15,
        sample_size: Optional[int] = None,
        output_dir: str = 'proposal_threshold_optimization'
    ) -> Dict:
        """
        

        Args:
            threshold_range: 
            num_points: 
            sample_size: None=
            output_dir: 

        Returns:
            
        """

        print("=" * 80)
        print(" Proposal Threshold Optimization")
        print("=" * 80)
        print(f"Threshold range: {threshold_range}")
        print(f"Number of test points: {num_points}")
        print(f"Sample size: {sample_size or 'All'}")
        print(f"Target proposals/image: {self.target_proposals_range}")
        print()

        # 
        val_data = self._get_val_images_and_labels()

        if sample_size:
            print(f" Using random sample of {sample_size} images")

        # 
        thresholds = np.linspace(
            threshold_range[0],
            threshold_range[1],
            num_points
        )

        # 
        all_metrics = []

        for threshold in thresholds:
            metrics = self._evaluate_proposals_for_threshold(
                threshold,
                val_data,
                sample_size
            )
            all_metrics.append(metrics)

            print(f"\n Results for threshold {threshold:.4f}:")
            print(f"  Recall@0.5:  {metrics.recall_at_05:.4f}")
            print(f"  Recall@0.75: {metrics.recall_at_075:.4f}")
            print(f"  Mean Max IoU: {metrics.mean_max_iou:.4f}")
            print(f"  Avg Proposals/Image: {metrics.avg_proposals_per_image:.2f}")
            print(f"  Composite Score: {metrics.composite_score:.4f}")

        # 
        best_idx = np.argmax([m.composite_score for m in all_metrics])
        best_metrics = all_metrics[best_idx]

        print("\n" + "=" * 80)
        print(" Best Threshold Found")
        print("=" * 80)
        print(f"Threshold: {best_metrics.threshold:.4f}")
        print(f"Recall@0.5: {best_metrics.recall_at_05:.4f}")
        print(f"Mean Max IoU: {best_metrics.mean_max_iou:.4f}")
        print(f"Avg Proposals/Image: {best_metrics.avg_proposals_per_image:.2f}")
        print(f"Composite Score: {best_metrics.composite_score:.4f}")
        print()

        # 
        os.makedirs(output_dir, exist_ok=True)

        results = {
            'best_threshold': best_metrics.threshold,
            'best_metrics': best_metrics.to_dict(),
            'all_metrics': [m.to_dict() for m in all_metrics],
            'optimization_config': {
                'threshold_range': threshold_range,
                'num_points': num_points,
                'sample_size': sample_size,
                'recall_weight': self.recall_weight,
                'iou_weight': self.iou_weight,
                'count_penalty': self.count_penalty,
                'target_proposals_range': self.target_proposals_range
            }
        }

        # JSON
        results_file = os.path.join(output_dir, 'optimization_results.json')
        with open(results_file, 'w', encoding='utf-8') as f:
            json.dump(results, f, indent=2, ensure_ascii=False)

        print(f" Results saved to: {results_file}")

        # 
        self._visualize_results(all_metrics, best_metrics, output_dir)

        # 
        self._generate_recommendations(best_metrics, output_dir)

        return results

    def _visualize_results(
        self,
        all_metrics: List[ProposalQualityMetrics],
        best_metrics: ProposalQualityMetrics,
        output_dir: str
    ):
        """"""

        thresholds = [m.threshold for m in all_metrics]

        fig, axes = plt.subplots(2, 3, figsize=(18, 10))
        fig.suptitle('Proposal Threshold Optimization Results', fontsize=16, fontweight='bold')

        # 1. Recall @ 0.5
        ax = axes[0, 0]
        ax.plot(thresholds, [m.recall_at_05 for m in all_metrics], 'o-', linewidth=2, markersize=8)
        ax.axvline(best_metrics.threshold, color='red', linestyle='--', linewidth=2, label='Best')
        ax.set_xlabel('Threshold')
        ax.set_ylabel('Recall @ IoU=0.5')
        ax.set_title('Recall @ 0.5')
        ax.grid(True, alpha=0.3)
        ax.legend()

        # 2. Recall @ 0.75
        ax = axes[0, 1]
        ax.plot(thresholds, [m.recall_at_075 for m in all_metrics], 'o-', linewidth=2, markersize=8)
        ax.axvline(best_metrics.threshold, color='red', linestyle='--', linewidth=2, label='Best')
        ax.set_xlabel('Threshold')
        ax.set_ylabel('Recall @ IoU=0.75')
        ax.set_title('Recall @ 0.75')
        ax.grid(True, alpha=0.3)
        ax.legend()

        # 3. Mean Max IoU
        ax = axes[0, 2]
        ax.plot(thresholds, [m.mean_max_iou for m in all_metrics], 'o-', linewidth=2, markersize=8)
        ax.axvline(best_metrics.threshold, color='red', linestyle='--', linewidth=2, label='Best')
        ax.set_xlabel('Threshold')
        ax.set_ylabel('Mean Max IoU')
        ax.set_title('Mean Max IoU')
        ax.grid(True, alpha=0.3)
        ax.legend()

        # 4. Avg Proposals per Image
        ax = axes[1, 0]
        ax.plot(thresholds, [m.avg_proposals_per_image for m in all_metrics], 'o-', linewidth=2, markersize=8)
        ax.axvline(best_metrics.threshold, color='red', linestyle='--', linewidth=2, label='Best')
        ax.axhline(self.target_proposals_range[0], color='green', linestyle=':', alpha=0.5, label='Target Range')
        ax.axhline(self.target_proposals_range[1], color='green', linestyle=':', alpha=0.5)
        ax.set_xlabel('Threshold')
        ax.set_ylabel('Avg Proposals/Image')
        ax.set_title('Proposals per Image')
        ax.grid(True, alpha=0.3)
        ax.legend()

        # 5. Composite Score
        ax = axes[1, 1]
        ax.plot(thresholds, [m.composite_score for m in all_metrics], 'o-', linewidth=2, markersize=8)
        ax.axvline(best_metrics.threshold, color='red', linestyle='--', linewidth=2, label='Best')
        ax.set_xlabel('Threshold')
        ax.set_ylabel('Composite Score')
        ax.set_title('Composite Score (Higher is Better)')
        ax.grid(True, alpha=0.3)
        ax.legend()

        # 6. Trade-off: Recall vs Proposals
        ax = axes[1, 2]
        scatter = ax.scatter(
            [m.avg_proposals_per_image for m in all_metrics],
            [m.recall_at_05 for m in all_metrics],
            c=[m.mean_max_iou for m in all_metrics],
            s=150,
            cmap='viridis',
            edgecolors='black',
            linewidths=1.5
        )
        ax.scatter(
            best_metrics.avg_proposals_per_image,
            best_metrics.recall_at_05,
            c='red',
            s=300,
            marker='*',
            edgecolors='black',
            linewidths=2,
            label='Best',
            zorder=10
        )
        ax.axvline(self.target_proposals_range[0], color='green', linestyle=':', alpha=0.5)
        ax.axvline(self.target_proposals_range[1], color='green', linestyle=':', alpha=0.5)
        ax.set_xlabel('Avg Proposals/Image')
        ax.set_ylabel('Recall @ 0.5')
        ax.set_title('Recall vs Proposals Trade-off')
        ax.grid(True, alpha=0.3)
        ax.legend()
        plt.colorbar(scatter, ax=ax, label='Mean Max IoU')

        plt.tight_layout()

        # 
        plot_file = os.path.join(output_dir, 'optimization_curves.png')
        plt.savefig(plot_file, dpi=300, bbox_inches='tight')
        print(f" Visualization saved to: {plot_file}")

        plt.close()

    def _generate_recommendations(
        self,
        best_metrics: ProposalQualityMetrics,
        output_dir: str
    ):
        """"""

        recommendations = []

        recommendations.append(f"# Proposal Threshold Optimization Recommendations\n")
        recommendations.append(f"## Best Threshold: {best_metrics.threshold:.4f}\n")
        recommendations.append(f"\n### Performance Metrics:")
        recommendations.append(f"- Recall @ IoU=0.5: {best_metrics.recall_at_05:.4f}")
        recommendations.append(f"- Recall @ IoU=0.75: {best_metrics.recall_at_075:.4f}")
        recommendations.append(f"- Mean Max IoU: {best_metrics.mean_max_iou:.4f}")
        recommendations.append(f"- Avg Proposals/Image: {best_metrics.avg_proposals_per_image:.2f}")
        recommendations.append(f"- Composite Score: {best_metrics.composite_score:.4f}\n")

        recommendations.append(f"\n### How to Apply:")
        recommendations.append(f"1. Update `src/config.py`:")
        recommendations.append(f"   ```python")
        recommendations.append(f"   STAGE1_CONFIG = {{")
        recommendations.append(f"       'confidence_threshold': {best_metrics.threshold:.4f},")
        recommendations.append(f"       # ... other settings")
        recommendations.append(f"   }}")
        recommendations.append(f"   ```\n")

        recommendations.append(f"2. Regenerate proposals:")
        recommendations.append(f"   ```bash")
        recommendations.append(f"   python main.py gen-proposals --conf-thresh {best_metrics.threshold:.4f}")
        recommendations.append(f"   ```\n")

        recommendations.append(f"3. Retrain Stage 2 with new proposals:")
        recommendations.append(f"   ```bash")
        recommendations.append(f"   python main.py train-stage2 --epochs 80")
        recommendations.append(f"   ```\n")

        # 
        recommendations.append(f"\n### Analysis:")

        if best_metrics.recall_at_05 < 0.8:
            recommendations.append(f"-  Recall is relatively low ({best_metrics.recall_at_05:.2%}). Consider:")
            recommendations.append(f"  - Using a lower threshold")
            recommendations.append(f"  - Improving Stage 1 training")
        else:
            recommendations.append(f"-  Good recall ({best_metrics.recall_at_05:.2%})")

        if best_metrics.avg_proposals_per_image < self.target_proposals_range[0]:
            recommendations.append(f"-  Low proposals count ({best_metrics.avg_proposals_per_image:.1f}). May miss detections.")
        elif best_metrics.avg_proposals_per_image > self.target_proposals_range[1]:
            recommendations.append(f"-  High proposals count ({best_metrics.avg_proposals_per_image:.1f}). May slow down Stage 2.")
        else:
            recommendations.append(f"-  Proposals count in target range ({best_metrics.avg_proposals_per_image:.1f})")

        if best_metrics.mean_max_iou < 0.6:
            recommendations.append(f"-  Low mean IoU ({best_metrics.mean_max_iou:.2f}). Stage 1 localization needs improvement.")
        else:
            recommendations.append(f"-  Good mean IoU ({best_metrics.mean_max_iou:.2f})")

        # 
        rec_file = os.path.join(output_dir, 'RECOMMENDATIONS.md')
        with open(rec_file, 'w', encoding='utf-8') as f:
            f.write('\n'.join(recommendations))

        print(f" Recommendations saved to: {rec_file}")
        print("\n" + "=" * 80)
        print(" Recommendations:")
        print("=" * 80)
        for line in recommendations:
            print(line)


def run_optimization(
    data_yaml: str = None,
    stage1_weights: str = None,
    threshold_range: Tuple[float, float] = (0.001, 0.1),
    num_points: int = 15,
    sample_size: Optional[int] = None,
    output_dir: str = 'proposal_threshold_optimization',
    recall_weight: float = 1.0,
    iou_weight: float = 0.8,
    count_penalty: float = 0.1,
    target_min: int = 20,
    target_max: int = 100
):
    """
    

    Args:
        data_yaml: 
        stage1_weights: Stage 1
        threshold_range: 
        num_points: 
        sample_size: 
        output_dir: 
        recall_weight: 
        iou_weight: IoU
        count_penalty: 
        target_min: proposals
        target_max: proposals
    """

    # 
    if data_yaml is None:
        data_yaml = DATA_YAML

    if stage1_weights is None:
        stage1_weights = STAGE1_CONFIG['weights_path']

    # 
    if not os.path.exists(data_yaml):
        raise FileNotFoundError(f"Data YAML not found: {data_yaml}")

    if not os.path.exists(stage1_weights):
        raise FileNotFoundError(f"Stage 1 weights not found: {stage1_weights}")

    # 
    optimizer = ProposalThresholdOptimizer(
        data_yaml=data_yaml,
        stage1_weights=stage1_weights,
        recall_weight=recall_weight,
        iou_weight=iou_weight,
        count_penalty=count_penalty,
        target_proposals_range=(target_min, target_max)
    )

    # 
    results = optimizer.optimize(
        threshold_range=threshold_range,
        num_points=num_points,
        sample_size=sample_size,
        output_dir=output_dir
    )

    return results


if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser(description='Optimize Proposal Threshold')
    parser.add_argument('--data-yaml', type=str, help='Path to data.yaml')
    parser.add_argument('--stage1-weights', type=str, help='Path to Stage 1 weights')
    parser.add_argument('--threshold-min', type=float, default=0.001, help='Min threshold')
    parser.add_argument('--threshold-max', type=float, default=0.1, help='Max threshold')
    parser.add_argument('--num-points', type=int, default=15, help='Number of test points')
    parser.add_argument('--sample-size', type=int, help='Validation sample size')
    parser.add_argument('--output-dir', type=str, default='proposal_threshold_optimization')
    parser.add_argument('--target-min', type=int, default=20, help='Target min proposals/image')
    parser.add_argument('--target-max', type=int, default=100, help='Target max proposals/image')

    args = parser.parse_args()

    run_optimization(
        data_yaml=args.data_yaml,
        stage1_weights=args.stage1_weights,
        threshold_range=(args.threshold_min, args.threshold_max),
        num_points=args.num_points,
        sample_size=args.sample_size,
        output_dir=args.output_dir,
        target_min=args.target_min,
        target_max=args.target_max
    )
