import sys
import os
res=os.path.abspath(__file__) #获取当前文件的绝对路径
base_path=os.path.dirname(os.path.dirname(res)) #获取当前文件的上两级目录
sys.path.insert(1, base_path)

base_path_pre=os.path.dirname(base_path)
sys.path.insert(1, base_path_pre)

import argparse
import copy
import os
import os.path as osp
import time
import warnings

# os.environ['CUDA_VISIBLE_DEVICES']="1"

import mmcv
import torch
import torch.distributed as dist
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash

from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import (collect_env, get_device, get_root_logger,
                         replace_cfg_vals, setup_multi_processes,
                         update_data_root)
import mmcv_custom  # noqa: F401,F403
import mmdet_custom  # noqa: F401,F403

from mmdet_custom.models import DINOHead

def parse_args():
    parser = argparse.ArgumentParser(description='Train a detector')
    parser.add_argument('--config', help='train config file path', default="/mnt/pde/algorithm/user/qxu/project/InternImage/allinone_new/configs/coco/all_in_one.py")
    parser.add_argument('--work-dir', help='the dir to save logs and models')
    parser.add_argument('--resume-from',
                        help='the checkpoint file to resume from')
    parser.add_argument('--auto-resume',
                        action='store_true',
                        help='resume from the latest checkpoint automatically')
    parser.add_argument(
        '--no-validate',
        action='store_true',
        help='whether not to evaluate the checkpoint during training')
    group_gpus = parser.add_mutually_exclusive_group()
    group_gpus.add_argument(
        '--gpus',
        type=int,
        help='(Deprecated, please use --gpu-id) number of gpus to use '
        '(only applicable to non-distributed training)')
    group_gpus.add_argument(
        '--gpu-ids',
        type=int,
        nargs='+',
        help='(Deprecated, please use --gpu-id) ids of gpus to use '
        '(only applicable to non-distributed training)')
    group_gpus.add_argument('--gpu-id',
                            type=int,
                            default=0,
                            help='id of gpu to use '
                            '(only applicable to non-distributed training)')
    parser.add_argument('--seed', type=int, default=None, help='random seed')
    parser.add_argument(
        '--diff-seed',
        action='store_true',
        help='Whether or not set different seeds for different ranks')
    parser.add_argument(
        '--deterministic',
        action='store_true',
        help='whether to set deterministic options for CUDNN backend.')
    parser.add_argument(
        '--options',
        nargs='+',
        action=DictAction,
        help='override some settings in the used config, the key-value pair '
        'in xxx=yyy format will be merged into config file (deprecate), '
        'change to --cfg-options instead.')
    parser.add_argument(
        '--cfg-options',
        nargs='+',
        action=DictAction,
        help='override some settings in the used config, the key-value pair '
        'in xxx=yyy format will be merged into config file. If the value to '
        'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
        'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
        'Note that the quotation marks are necessary and that no white space '
        'is allowed.')
    parser.add_argument('--launcher',
                        choices=['none', 'pytorch', 'slurm', 'mpi'],
                        default='pytorch',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int)
    parser.add_argument('--auto-scale-lr',
                        action='store_true',
                        help='enable automatically scaling LR.')
    args = parser.parse_args()
    if 'LOCAL_RANK' not in os.environ:
        os.environ['LOCAL_RANK'] = str(args.local_rank)

    if args.options and args.cfg_options:
        raise ValueError(
            '--options and --cfg-options cannot be both '
            'specified, --options is deprecated in favor of --cfg-options')
    if args.options:
        warnings.warn('--options is deprecated in favor of --cfg-options')
        args.cfg_options = args.options

    return args


def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)

    # replace the ${key} with the value of cfg.key
    cfg = replace_cfg_vals(cfg)
    print(cfg)

    # update data root according to MMDET_DATASETS
    update_data_root(cfg)

    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)

    # if args.auto_scale_lr:
    #     if 'auto_scale_lr' in cfg and \
    #             'enable' in cfg.auto_scale_lr and \
    #             'base_batch_size' in cfg.auto_scale_lr:
    #         cfg.auto_scale_lr.enable = True
    #     else:
    #         warnings.warn('Can not find "auto_scale_lr" or '
    #                       '"auto_scale_lr.enable" or '
    #                       '"auto_scale_lr.base_batch_size" in your'
    #                       ' configuration file. Please update all the '
    #                       'configuration files to mmdet >= 2.24.1.')

    # set multi-process settings
    setup_multi_processes(cfg)
    

    # detection dateset build
    # use CocoDataset
    from mmdet.datasets.coco import CocoDataset
    datasets = [build_dataset(cfg.data.detection.train)]

    ## train process
    # step 1 build dataloader
    # prepare data loaders
    dataset = datasets if isinstance(datasets, (list, tuple)) else [datasets]
    dataset_train = dataset[0]
    # dataset_val = dataset[1
    
    sampler_train = torch.utils.data.DistributedSampler(
        dataset_train, num_replicas=1, rank=0, shuffle=True)

    from torch.utils.data import DataLoader
    from functools import partial
    from mmcv.parallel import collate
    data_loader_train = DataLoader(
        dataset_train, sampler=sampler_train,
        batch_size=1,
        num_workers=0,
        collate_fn=partial(collate, samples_per_gpu=1),
        pin_memory=False,
        drop_last=True,
    )
    for idx, batch in enumerate(data_loader_train):
        print(idx, batch)


    
   
if __name__ == '__main__':
    main()