import torch
import os
from dotenv import load_dotenv

load_dotenv()

DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATASET_DIR = os.path.join(PROJECT_ROOT, "dataset")
WEIGHTS_DIR = os.path.join(PROJECT_ROOT, "weights")
PROPOSALS_DIR = os.path.join(PROJECT_ROOT, "proposals")
RUNS_DIR = os.path.join(PROJECT_ROOT, "runs")
DATA_YAML = os.path.join(DATASET_DIR, "data.yaml")

os.makedirs(WEIGHTS_DIR, exist_ok=True)
os.makedirs(PROPOSALS_DIR, exist_ok=True)

STAGE1_CONFIG = {
    "model_name": 'yolov10n.pt',
    "weights_path": os.path.join(WEIGHTS_DIR, "stage1_best.pt"),  # Unified naming: stage1_best.pt
    "data_yaml": DATA_YAML,
    "epochs": 50,
    "batch_size": 16,
    "img_size": 640,
    "tile_size": 640,
    "tile_overlap": 100,
    "confidence_threshold": 0.01,  # Low threshold to obtain more proposals
}

# ===========================================================================
# Stage 2 Configuration - Unified Naming Convention
# ===========================================================================
# Checkpoint naming format:
#   - Main model: stage2_best_val_acc.pth, stage2_best_val_loss.pth, stage2_final.pth
#   - EMA model: stage2_best_val_acc_ema.pth, stage2_final_ema.pth
#   - SWA model: stage2_final_swa.pth
#
# Stage 2 training uses unified script:
#   - train_stage2.py (all features, supports multiple backbones)
#   - _deprecated_train_stage2_unified.py (old script, kept for reference)
# ===========================================================================

STAGE2_CONFIG = {
    "weights_path": os.path.join(WEIGHTS_DIR, "stage2_best_val_acc.pth"),  # Unified naming
    "proposals_json": os.path.join(PROPOSALS_DIR, "proposals.json"),
    "epochs": 80,
    "batch_size": 8,
    "learning_rate": 1e-4,
    "roi_size": 224,
    "positive_iou_thresh": 0.5,
    "negative_iou_thresh": 0.3,

    # Feature extractor configuration
    "unfreeze_layers": 2,  # Unfreeze last 2 layers (0=freeze all, -1=unfreeze all, >0=unfreeze last n layers)

    # Loss weights
    "bbox_loss_weight": 2.0,  # BBox regression loss weight

    # Model smoothing
    "use_ema": True,
    "ema_decay": 0.9999,
    "use_swa": True,
    "swa_start_ratio": 0.75,

    # Optimizer and learning rate
    "warmup_epochs": 5,
    "weight_decay": 1e-4,

    # Loss functions
    "use_ohem": True,
    "ohem_ratio": 0.7,
    "use_focal_loss": True,
    "focal_alpha": 0.25,
    "focal_gamma": 2.0,

    # Training techniques
    "gradient_clip": 1.0,
    "early_stopping_patience": 15,
}

SERVER_CONFIG = {
    "host": "0.0.0.0",
    "port": 8000,
    "refiner_confidence_threshold": 0.25,  # Lowered threshold
    "nms_iou_threshold": 0.45,
    "use_tta": False,
    "tta_scales": 3,

    "stage1_weights": STAGE1_CONFIG['weights_path'],
    "stage2_weights": STAGE2_CONFIG['weights_path'],
    "stage2_ema_weights": os.path.join(WEIGHTS_DIR, "stage2_best_val_acc_ema.pth"),  # Unified naming
    "proposer_confidence_threshold": 0.01,  # Lowered threshold
}

VLM_CONFIG = {
    "gemini_api_key": os.getenv("GEMINI_API_KEY"),
    "model_name": "gemini-pro-vision",
}

MODEL_CONFIG = {
    "dino_model_name": "dinov2_vits14",
    "dino_out_channels": 384,
    "overlock_out_channels": 640,
    "fusion_out_channels": 512,
}

# Improved Architecture Configuration
IMPROVED_MODEL_CONFIG = {
    # Backbone selection (NEW)
    # Can be overridden by environment variable: export PRISM_BACKBONE=resnet50
    "backbone": os.getenv("PRISM_BACKBONE", "overlock"),  # Options: "overlock", "resnet50"
    # Use "resnet50" if OverLock has CUDA issues

    # Dynamic attention fusion configuration
    "use_dynamic_fusion": True,  # Enable dynamic attention fusion (enabled by default)
    "fusion_use_channel_attention": True,  # Channel attention
    "fusion_use_spatial_attention": True,  # Spatial attention
    "fusion_use_cross_attention": True,   # Cross attention
    "fusion_reduction_ratio": 16,         # Reduction ratio for channel attention
    "fusion_spatial_kernel_size": 7,      # Kernel size for spatial attention

    # Cross-ROI self-attention configuration
    "use_cross_roi_attention": True,      # Enable cross-ROI attention (enabled by default)
    "cross_roi_num_heads": 8,             # Number of attention heads
    "cross_roi_dropout": 0.1,             # Dropout rate
    "cross_roi_position_embed_dim": 128,  # Position embedding dimension (customizable)
    "cross_roi_use_relative_pos": True,   # Use relative position encoding
    "cross_roi_max_rois": 256,            # Maximum number of ROIs (may OOM if exceeded)

    # Multi-head attention diversity loss (prevent head collapse)
    "use_diversity_loss": False,          # Enable diversity loss (disabled by default, enable as needed)
    "diversity_loss_type": "cosine",      # Diversity loss type: 'cosine', 'kl', 'orthogonal'
    "diversity_loss_weight": 0.01,        # Diversity loss weight (relative to main loss)

    # FPN + RoIAlign configuration (advanced feature extraction)
    "use_fpn": False,                     # Enable FPN (disabled by default, requires data flow redesign)
    "fpn_in_channels": [128, 384, 640, 640],  # OverLoCK channel dimensions per layer [C2,C3,C4,C5]
    "fpn_out_channels": 256,              # Unified FPN output channels
    "fpn_use_p2": True,                   # Whether to use P2 layer (high resolution, computationally expensive)
    "fpn_levels": [2, 3, 4, 5],          # FPN levels to use

    "use_roi_align": False,               # Enable RoIAlign (disabled by default, works with FPN)
    "roi_align_output_size": 7,           # RoIAlign output size
    "roi_align_sampling_ratio": 2,        # RoIAlign sampling ratio (-1=adaptive)
    "roi_align_canonical_scale": 224,     # Canonical scale (for assigning ROIs to FPN levels)
    "roi_align_canonical_level": 4,       # FPN level corresponding to canonical scale

    # Feature dimensions
    "overlock_channels": 640,
    "dino_channels": 384,
    "fusion_channels": 512,
}

ADAPTER_CONFIG = {
    "overlock_channels": [128, 384, 640],  # OverLoCK output channels [P3, P4, P5]
    "yolo_head_channels": [256, 512, 1024],  # YOLO head expected input channels
}