from mmengine.registry import PARAM_SCHEDULERS
from typing import Callable, Dict, List, Optional, Sequence, Union
from mmengine.optim import (OptimWrapper, OptimWrapperDict, _ParamScheduler,
                            build_optim_wrapper)
from mmengine.utils import apply_to, digit_version, get_git_hash, is_seq_of
from pprint import pprint
from mmengine.optim import OptimWrapper
from mmengine.runner.base_loop import BaseLoop
from mmengine.optim import build_optim_wrapper
from backbone.vit_win_rvsa_v3_wsz7 import vit_b_rvsa
from mmcv_custom.layer_decay_optimizer_constructor_vit import *

import argparse

import copy

ParamSchedulerType = Union[List[_ParamScheduler], Dict[str,
                                                       List[_ParamScheduler]]]

def get_parse_args():
    parser = argparse.ArgumentParser(description='PyTorch MutliTask', add_help=False)
    parser.add_argument('--backbone', type=str, default='vit_b_rvsa', choices=['vit_b_rvsa', 'vit_l_rvsa', 'internimage_xl', 'vitaev2_s','vit_b', 'vit_l',], help='backbone name')
    parser.add_argument('--datasets', type=str, nargs='+',help='used dataset')
    parser.add_argument('--tasks', type=str, nargs='+',help='used dataset')
    # epoch
    parser.add_argument('--start_epoch', type=int, default=0, help='index of start epoch')
    parser.add_argument('--start_iter', type=int, default=0, help='index of start iteration')
    parser.add_argument('--end_iter', type=int, default=30000, help='number of epochs to train')
    # batch size
    parser.add_argument('--batch_size', type=int, default=2, help='input batch size for training')
    parser.add_argument('--batch_size_val', type=int, default=1, help='input batch size for validation')
    parser.add_argument('--workers', type=int, default=4, help='workers num')
    parser.add_argument('--batch_mode', type=str, default='avg', choices=['ratio','avg'], help='how to assign batch size')
    # learning rate
    parser.add_argument('--lr', type=float, default=None, help='actual learning rate')
    parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)')
    # distributed
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
    parser.add_argument('--distributed', type=str, default='True', choices=['True', 'False'], help='distributed training')
    parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
    parser.add_argument('--local_rank', type=int, default=0)
    parser.add_argument('--gpu', type=int, default=0)

    # ft
    parser.add_argument('--ft', type=str, default='False', choices=['True', 'False'], help='finetune model')
    parser.add_argument('--resume', type=str, default=None, help='dataset name')
    # save
    parser.add_argument('--save_path', type=str, default='./rdonly_full', help='path of saving model')
    # ignored
    parser.add_argument('--ignore_label', type=int, default=255, help='ignore index of loss')
    # interval
    parser.add_argument('--interval', type=int,  default=2000, help='valid interval')
    # init_backbone
    parser.add_argument('--init_backbone', type=str, default='mae', choices=['imp', 'rsp', 'none', 
                                                                            'mae', 'beit'], help='init model')
    # port 
    parser.add_argument('--port', type=str, default=None, help='master ports')
    parser.add_argument('--interact', type=str, default='None', help='master ports')

    # input img size
    parser.add_argument('--image_size', type=int, default=1024, help='image size')
    # background
    parser.add_argument('--background', type=str, default='True', choices=['True', 'False'], help='consider background')
    # checkpoint mechanism
    parser.add_argument('--use_ckpt', type=str, default='False', choices=['True', 'False'], help='consider background')
    # mixed presicion
    parser.add_argument('--mixed_precision', type=str, default='True', choices=['True', 'False'], help='consider background')
    parser.add_argument('--seed', type=int, default=1234, help='random seeds')
    parser.add_argument('--max_epoch', type=int, default=100, help='random seeds')
    parser.add_argument('--finetune', type=str, default=None)

    return parser.parse_args()

def _check_scheduler_cfg(
        param_scheduler: Optional[Union[dict, list,
                                                _ParamScheduler]]) -> None:
    """Parse `param_scheduler` to a list of parameter schedulers, or a
    `dict` of which each value is a list of parameter schedulers.

    If only one optimizer is used, the parsed config should be a
    list of parameter scheduler configs or instances. If multiple
    optimizers are used, the parsed config should be `dict`.
    Its key should be consistent with the optimizer `dict` and its value
    should be a list of parameter scheduler configs or instances. See
    :meth:`build_param_scheduler` for more details.

    Examples:
        >>> # valid scheduler:
        >>> # empty scheduler
        >>> scheduler = None
        >>> # Single scheduler
        >>> scheduler = dict(type='MultiStepLR', milestones=[1, 2])
        >>> # Single list schedulers
        >>> scheduler = [dict(type='MultiStepLR', milestones=[1, 2]),
        >>>              dict(type='MultiStepLR', milestones=[2, 3])]
        >>> # `dict` of schedulers
        >>> scheduler = dict(linear1=dict(type='MultiStepLR', milestones=[1, 2]),
        >>>                  linear2=dict(type='MultiStepLR', milestones=[1, 2]))
        >>> # `dict` of `list` of schedulers
        >>> scheduler = dict(linear1=[dict(type='MultiStepLR', milestones=[1, 2])],
        >>>                  linear2=[dict(type='MultiStepLR', milestones=[1, 2])])
        >>> # Single built scheduler
        >>> from mmengine.optim import MultiStepLR
        >>> scheduler = MultiStepLR(milestones=[1, 2], optimizer=optimizer)
        >>> # Single built list schedulers
        >>> scheduler = [MultiStepLR(milestones=[1, 2], optimizer=optimizer)]
        >>> # dict of built scheduler
        >>> scheduler = dict(linear1=MultiStepLR(milestones=[1, 2], optimizer=optimizer),
        >>>                  linear2=MultiStepLR(milestones=[1, 2], optimizer=optimizer))
        >>> # dict of built list schedulers
        >>> scheduler = dict(linear1=[MultiStepLR(milestones=[1, 2], optimizer=optimizer)],
        >>>                  linear2=[MultiStepLR(milestones=[1, 2], optimizer=optimizer)])

    Args:
        param_scheduler (dict or list): The original parameter scheduler.
    """  # noqa: E501
    if param_scheduler is None:
        return
    if isinstance(param_scheduler, _ParamScheduler):
        return
    if is_seq_of(param_scheduler, _ParamScheduler):
        return

    if is_seq_of(param_scheduler, dict):
        for _param_scheduler in param_scheduler:
            # pprint(_param_scheduler)
            assert 'type' in _param_scheduler, (
                'Each parameter scheduler should contain the key type, '
                f'but got {_param_scheduler}')
    elif isinstance(param_scheduler, dict):
        if 'type' not in param_scheduler:
            for key, _param_scheduler in param_scheduler.items():
                assert isinstance(
                    _param_scheduler,
                    (dict, tuple, list, _ParamScheduler)), (
                        'Each value of `param_scheduler` should be a '
                        f'dict or a list, but got {_param_scheduler} with '
                        f'type {type(_ParamScheduler)}')

    else:
        raise TypeError(
            '`param_scheduler` should be a `_ParamScheduler`, `dict`, '
            f'list or a tuple, but got {type(param_scheduler)}. If '
            '`param_scheduler` is a list of dict, it means a list of '
            'scheduler configs for single optimizer. If it is a dict and '
            'contains key `type`, it means a scheduler config for a '
            'single optimizer. If it does not contain key `type`, it '
            'means multiple lists of schedulers for multiple optimizers.')

def build_param_scheduler(optim_wrapper, 
                          scheduler: Union[_ParamScheduler, Dict, List],
                          train_dataloader) -> ParamSchedulerType:
    """Build parameter schedulers.

    ``build_param_scheduler`` should be called after
    ``build_optim_wrapper`` because the building logic will change
    according to the number of optimizers built by the runner.
    The cases are as below:

    - Single optimizer: When only one optimizer is built and used in the
        runner, ``build_param_scheduler`` will return a list of
        parameter schedulers.
    - Multiple optimizers: When two or more optimizers are built and used
        in runner, ``build_param_scheduler`` will return a dict containing
        the same keys with multiple optimizers and each value is a list of
        parameter schedulers. Note that, if you want different optimizers to
        use different parameter schedulers to update optimizer's
        hyper-parameters, the input parameter ``scheduler`` also needs to be
        a dict and its key are consistent with multiple optimizers.
        Otherwise, the same parameter schedulers will be used to update
        optimizer's hyper-parameters.

    Args:
        scheduler (_ParamScheduler or dict or list): A Param Scheduler
            object or a dict or list of dict to build parameter schedulers.

    Examples:
        >>> # build one scheduler
        >>> optim_cfg = dict(dict(type='SGD', lr=0.01))
        >>> runner.optim_wrapper = runner.build_optim_wrapper(
        >>>     optim_cfg)
        >>> scheduler_cfg = dict(type='MultiStepLR', milestones=[1, 2])
        >>> schedulers = runner.build_param_scheduler(scheduler_cfg)
        >>> schedulers
        [<mmengine.optim.scheduler.lr_scheduler.MultiStepLR at 0x7f70f6966290>]  # noqa: E501

        >>> # build multiple schedulers
        >>> scheduler_cfg = [
        ...    dict(type='MultiStepLR', milestones=[1, 2]),
        ...    dict(type='StepLR', step_size=1)
        ... ]
        >>> schedulers = runner.build_param_scheduler(scheduler_cfg)
        >>> schedulers
        [<mmengine.optim.scheduler.lr_scheduler.MultiStepLR at 0x7f70f60dd3d0>,  # noqa: E501
        <mmengine.optim.scheduler.lr_scheduler.StepLR at 0x7f70f6eb6150>]

    Above examples only provide the case of one optimizer and one scheduler
    or multiple schedulers. If you want to know how to set parameter
    scheduler when using multiple optimizers, you can find more examples
    `optimizer-docs`_.

    Returns:
        list[_ParamScheduler] or dict[str, list[_ParamScheduler]]: List of
        parameter schedulers or a dictionary contains list of parameter
        schedulers build from ``scheduler``.

    .. _optimizer-docs:
        https://mmengine.readthedocs.io/en/latest/tutorials/optim_wrapper.html
    """
    param_schedulers: ParamSchedulerType

    # param_schedulers = dict()
    # for name, optimizer in optim_wrapper.items():
    #     if isinstance(scheduler, dict) and 'type' not in scheduler:
    #         # scheduler is a dict and each item is a ParamScheduler
    #         # object or a config to build ParamScheduler objects
    #         param_schedulers[name] = _build_param_scheduler(
    #             scheduler[name], optimizer, train_dataloader)
    #     else:
    #         param_schedulers[name] = _build_param_scheduler(
    #             scheduler, optimizer, train_dataloader)

    # return param_schedulers
    if not isinstance(optim_wrapper, OptimWrapperDict):
            # Since `OptimWrapperDict` inherits from `OptimWrapper`,
            # `isinstance(self.optim_wrapper, OptimWrapper)` cannot tell
            # whether `self.optim_wrapper` is an `OptimizerWrapper` or
            # `OptimWrapperDict` instance. Therefore, here we simply check
            # self.optim_wrapper is not an `OptimWrapperDict` instance and
            # then assert it is an OptimWrapper instance.
        assert isinstance(optim_wrapper, OptimWrapper), (
            '`build_optimizer` should be called before'
            '`build_param_scheduler` because the latter depends '
            'on the former')
        param_schedulers = _build_param_scheduler(
            scheduler, optim_wrapper, train_dataloader)  # type: ignore
        return param_schedulers
    else:
        param_schedulers = dict()
        for name, optimizer in optim_wrapper.items():
            if isinstance(scheduler, dict) and 'type' not in scheduler:
                # scheduler is a dict and each item is a ParamScheduler
                # object or a config to build ParamScheduler objects
                param_schedulers[name] = _build_param_scheduler(
                    scheduler[name], optimizer)
            else:
                param_schedulers[name] = _build_param_scheduler(
                    scheduler, optimizer)

    return param_schedulers

def _build_param_scheduler(
        scheduler: Union[_ParamScheduler, Dict, List],
        optim_wrapper: OptimWrapper,
        train_dataloader) -> List[_ParamScheduler]:
    """Build parameter schedulers for a single optimizer.

    Args:
        scheduler (_ParamScheduler or dict or list): A Param Scheduler
            object or a dict or list of dict to build parameter schedulers.
        optim_wrapper (OptimWrapper): An optimizer wrapper object is
            passed to construct ParamScheduler object.

    Returns:
        list[_ParamScheduler]: List of parameter schedulers build from
        ``scheduler``.

    Note:
        If the train loop is built, when building parameter schedulers,
        it supports setting the max epochs/iters as the default ``end``
        of schedulers, and supports converting epoch-based schedulers
        to iter-based according to the ``convert_to_iter_based`` key.
    """
    if not isinstance(scheduler, Sequence):
        schedulers = [scheduler]
    else:
        schedulers = scheduler

    param_schedulers = []
    for scheduler in schedulers:
        if isinstance(scheduler, _ParamScheduler):
            param_schedulers.append(scheduler)
        elif isinstance(scheduler, dict):
            _scheduler = copy.deepcopy(scheduler)

            # if isinstance(self._train_loop, BaseLoop):
            #     default_end = self.max_epochs if _scheduler.get(
            #         'by_epoch', True) else self.max_iters
            #     _scheduler.setdefault('end', default_end)
            #     self.logger.debug(
            #         f'The `end` of {_scheduler["type"]} is not set. '
            #         'Use the max epochs/iters of train loop as default.')

            param_schedulers.append(
                PARAM_SCHEDULERS.build(
                    _scheduler,
                    default_args=dict(
                        optimizer=optim_wrapper,
                        epoch_length=len(train_dataloader))))
                        # epoch_length=2000)))

        else:
            raise TypeError(
                'scheduler should be a _ParamScheduler object or dict, '
                f'but got {scheduler}')
    return param_schedulers



if __name__ == "__main__" :
    args = get_parse_args()
    model = vit_b_rvsa(args)

    param_scheduler = [
    dict(
        type='LinearLR',
        start_factor=1.0 / 3,
        by_epoch=False,
        begin=0,
        end=500),
    dict(
        type='MultiStepLR',
        begin=0,
        end=12,
        by_epoch=True,
        milestones=[8, 11],
        gamma=0.1)
    ]
    _check_scheduler_cfg(param_scheduler)

    optim_wrapper = dict(
    optimizer=dict(
    type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05),
    constructor='LayerDecayOptimizerConstructor_ViT',
    paramwise_cfg=dict(num_layers=39, 
                    layer_decay_rate=0.94,
                    depths=[5, 5, 24, 5]
                    )
                    )
    
    optimizer = build_optim_wrapper(model, optim_wrapper)

    scheduler = build_param_scheduler(optimizer, param_scheduler, None)
    scheduler[1].step()
