#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
train_baseline.py (stabilized)
- 读取你的 baseline 配置（包含 img_root/xml_root/train_list/test_list）
- 自动按实际 batch size 缩放 LR
- 自动在训练 pipeline 注入 FilterAnnotations 防止退化框导致 NaN
- 支持 --cfg-options 临时覆盖配置
"""

import argparse
import os, os.path as osp, time, copy, warnings
import mmcv
from mmcv import Config, DictAction
from mmcv.runner import init_dist, get_dist_info
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector

# 确保注册你的自定义数据集
import mmdet.datasets.thyroid_voc  # noqa: F401

def parse_args():
    parser = argparse.ArgumentParser(description="Train Faster R-CNN baseline (no OA-DG).")
    parser.add_argument("--config", help="Path to your baseline config file.")
    parser.add_argument("--work-dir", default=None, help="Directory to save logs and checkpoints.")
    parser.add_argument("--resume-from", default=None, help="Checkpoint to resume from.")
    parser.add_argument("--no-validate", action="store_true", help="Disable evaluation during training.")
    parser.add_argument("--seed", type=int, default=None, help="Random seed.")
    parser.add_argument("--deterministic", action="store_true", help="Use deterministic CUDNN (slower).")
    parser.add_argument("--launcher", choices=["none", "pytorch", "slurm", "mpi"], default="none")
    parser.add_argument("--local_rank", type=int, default=0)
    # 允许命令行覆盖任意 cfg 项，例如：--cfg-options optimizer.lr=0.0025 data.samples_per_gpu=2
    parser.add_argument("--cfg-options", nargs="+", action=DictAction, help="Override config keys: key=value.")
    # 可关掉自动缩放 LR
    parser.add_argument("--no-autoscale-lr", action="store_true", help="Disable auto LR scaling by batch size.")
    args = parser.parse_args()
    if "LOCAL_RANK" not in os.environ:
        os.environ["LOCAL_RANK"] = str(args.local_rank)
    return args

def _autoscale_lr(cfg, world_size, disabled=False):
    if disabled:
        return
    # 优先使用配置里的 base_batch_size；否则用 16（MMDet 标配）
    base_bs = 16
    if cfg.get("auto_scale_lr", None) and isinstance(cfg.auto_scale_lr, dict):
        base_bs = int(cfg.auto_scale_lr.get("base_batch_size", base_bs))
    # 取 samples_per_gpu（优先 data.train，否则 data 根上）
    spg = None
    if isinstance(cfg.data.get("train", {}), dict):
        spg = cfg.data.get("samples_per_gpu", None)  # 有的配置在 data 顶层
        if spg is None:
            spg = cfg.data.train.get("samples_per_gpu", None)
    if spg is None:
        # 兜底：很多 1x 配置默认 2
        spg = 2
    actual_bs = int(world_size) * int(spg)
    if actual_bs <= 0:
        return
    scale = actual_bs / float(base_bs)
    if "optimizer" in cfg and isinstance(cfg.optimizer, dict) and "lr" in cfg.optimizer:
        old_lr = float(cfg.optimizer["lr"])
        new_lr = old_lr * scale
        cfg.optimizer["lr"] = new_lr
        mmcv.print_log(f"[auto_scale_lr] base_bs={base_bs}, actual_bs={actual_bs}, "
                       f"scale={scale:.4f}, lr: {old_lr} -> {new_lr}", "mmcv")

def _inject_filter_annotations(cfg):
    """在 train pipeline 里注入 FilterAnnotations，过滤 w/h<=0 的框，避免编码 ln(0) -> NaN。"""
    try:
        tp = cfg.data.train.pipeline
        has_filter = any(isinstance(s, dict) and s.get("type") == "FilterAnnotations" for s in tp)
        if not has_filter:
            new_tp = []
            inserted = False
            for step in tp:
                new_tp.append(step)
                # 在 LoadAnnotations 之后插入
                if isinstance(step, dict) and step.get("type") == "LoadAnnotations" and not inserted:
                    new_tp.append(dict(type="FilterAnnotations",
                                       min_gt_bbox_wh=(1, 1),
                                       keep_empty=False))
                    inserted = True
            if not inserted:
                # 没找到 LoadAnnotations，就插在最前面（次优，但总比没有强）
                new_tp = [dict(type="FilterAnnotations",
                               min_gt_bbox_wh=(1, 1),
                               keep_empty=False)] + new_tp
            cfg.data.train.pipeline = new_tp
        # 开启过滤无标注图片（部分 xml 为空或全被过滤时）
        if isinstance(cfg.data.train, dict) and "filter_empty_gt" not in cfg.data.train:
            cfg.data.train["filter_empty_gt"] = True
    except Exception as e:
        warnings.warn(f"[warn] injecting FilterAnnotations failed: {e}")

def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)

    # work dir
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    elif cfg.get("work_dir", None) is None:
        cfg.work_dir = osp.join("./work_dirs", osp.splitext(osp.basename(args.config))[0] + "_baseline")

    if args.resume_from is not None:
        cfg.resume_from = args.resume_from

    # seed
    if args.seed is not None:
        cfg.seed = args.seed
        set_random_seed(args.seed, deterministic=args.deterministic)

    # dist
    if args.launcher == "none":
        distributed = False
        cfg.gpu_ids = range(1)
        world_size = 1
    else:
        distributed = True
        init_dist(args.launcher, **cfg.get("dist_params", dict(backend="nccl")))
        _, world_size = get_dist_info()
        cfg.gpu_ids = range(world_size)

    # === 稳定性增强 ===
    _autoscale_lr(cfg, world_size, disabled=args.no_autoscale_lr)
    _inject_filter_annotations(cfg)

    # datasets：与 mmdet 官方一致，datasets 只放 train；验证由 EvalHook 触发
    datasets = [build_dataset(cfg.data.train)]

    # model
    model = build_detector(cfg.model, train_cfg=cfg.get("train_cfg"), test_cfg=cfg.get("test_cfg"))
    model.init_weights()

    # ===  只在最佳时保存模型 ===
    cfg.checkpoint_config = dict(
        interval=1,  # 每个epoch检查一次
        save_best='bbox_mAP',  # 根据bbox mAP判断最佳模型，可根据需要修改为其他指标
        rule='greater',  # 指标越大越好
        max_keep_ckpts=1,  # 只保留一个最佳检查点
    )

    # 训练
    validate = not args.no_validate
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())

    train_detector(
        model,
        datasets,
        cfg,
        distributed=distributed,
        validate=validate,
        timestamp=timestamp,
        meta=dict(seed=cfg.get("seed"), exp_name=osp.basename(args.config)),
    )

if __name__ == "__main__":
    main()
