
import os
import math
from pathlib import Path
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import torch
from timm.scheduler.cosine_lr import CosineLRScheduler
from timm.scheduler.step_lr import StepLRScheduler
from timm.scheduler.scheduler import Scheduler
from torch import optim as optim
class SubsetRandomSampler(torch.utils.data.Sampler):
    r"""Samples elements randomly from a given list of indices, without replacement.

    Arguments:
        indices (sequence): a sequence of indices
    """

    def __init__(self, indices):
        self.epoch = 0
        self.indices = indices

    def __iter__(self):
        return (self.indices[i] for i in torch.randperm(len(self.indices)))

    def __len__(self):
        return len(self.indices)

    def set_epoch(self, epoch):
        self.epoch = epoch
        
from torch import distributed as dist
def get_dist_info():
    if dist.is_available() and dist.is_initialized():
        rank = dist.get_rank()
        world_size = dist.get_world_size()
    else:
        rank = 0
        world_size = 1
    return rank, world_size

def auto_scale_lr(cfg, logger, distributed=False):
    """Automatically scaling LR according to GPU number and sample per GPU.

    Args:
        cfg (config): Training config.
        distributed (bool): Using distributed or not.
        logger (logging.Logger): Logger.
    """
    # Get flag from config
    if ('auto_scale_lr' not in cfg) or \
            (not cfg.get('auto_scale_lr', False)):
        logger.info('Automatic scaling of learning rate (LR)'
                    ' has been disabled.')
        return

    # Get base batch size from config
    base_batch_size = cfg.get('base_batch_size', None)
    if base_batch_size is None:
        return

    # Get gpu number
    if distributed:
        _, world_size = get_dist_info()
        num_gpus = len(range(world_size))
    else:
        num_gpus = len(cfg.gpu_ids)
    # num_gpus = dist.get_world_size()

    # calculate the batch size 
    samples_per_gpu = cfg.data.detection.samples_per_gpu
    batch_size = num_gpus * samples_per_gpu
    logger.info(f'Training with {num_gpus} GPU(s) with {samples_per_gpu} '
                f'samples per GPU. The total batch size is {batch_size}.')

    if batch_size != base_batch_size:
        # scale LR with
        # [linear scaling rule](https://arxiv.org/abs/1706.02677)
        scaled_lr = (batch_size / base_batch_size) * cfg.optimizer.lr
        logger.info('LR has been automatically scaled '
                    f'from {cfg.optimizer.lr} to {scaled_lr}')
        cfg.optimizer.lr = scaled_lr
    else:
        logger.info('The batch size match the '
                    f'base batch size: {base_batch_size}, '
                    f'will not scaling the LR ({cfg.optimizer.lr}).')


def build_scheduler(config, optimizer, n_iter_per_epoch=0):
    # num_steps = int(config.TRAIN.ITERATIONS)
    # warmup_steps = int(config.TRAIN.ITERATIONS * 0.1)
    # decay_steps = int(config.TRAIN.ITERATIONS * 0.1)
    
    num_steps = int(config.train.epochs * n_iter_per_epoch)
    warmup_steps = int(config.train.warmup_epochs * n_iter_per_epoch)
    decay_steps = int(config.lr_scheduler.decay_epochs * n_iter_per_epoch)
    
    # num_steps = int(config.total_iter)
    # warmup_steps = int(config.warmup_iters)
    # decay_steps = int(config.total_iter * 0.1)

    lr_scheduler = None
    if config.lr_scheduler.name == 'cosine':
        lr_scheduler = CosineLRScheduler(
            optimizer,
            t_initial=num_steps,
            lr_min=config.train.min_lr,
            warmup_lr_init=config.train.warmup_lr,
            warmup_t=warmup_steps,
            cycle_limit=1,
            t_in_epochs=False,
        )
    elif config.lr_scheduler.name == 'linear':
        lr_scheduler = LinearLRScheduler(
            optimizer,
            t_initial=num_steps,
            lr_min_rate=0.01,
            warmup_lr_init=config.train_warmup_lr,
            warmup_t=warmup_steps,
            t_in_epochs=False,
        )
    elif config.lr_scheduler.name == 'step':
        lr_scheduler = StepLRScheduler(
            optimizer,
            decay_t=decay_steps,
            decay_rate=0.99,
            warmup_lr_init=config.lr_scheduler.warmup_ratio,
            warmup_t=warmup_steps,
            t_in_epochs=False,
        )

    return lr_scheduler


class LinearLRScheduler(Scheduler):
    def __init__(self,
                 optimizer: torch.optim.Optimizer,
                 t_initial: int,
                 lr_min_rate: float,
                 warmup_t=0,
                 warmup_lr_init=0.,
                 t_in_epochs=True,
                 noise_range_t=None,
                 noise_pct=0.67,
                 noise_std=1.0,
                 noise_seed=42,
                 initialize=True,
                 ) -> None:
        super().__init__(
            optimizer, param_group_field="lr",
            noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
            initialize=initialize)

        self.t_initial = t_initial
        self.lr_min_rate = lr_min_rate
        self.warmup_t = warmup_t
        self.warmup_lr_init = warmup_lr_init
        self.t_in_epochs = t_in_epochs
        if self.warmup_t:
            self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
            super().update_groups(self.warmup_lr_init)
        else:
            self.warmup_steps = [1 for _ in self.base_values]

    def _get_lr(self, t):
        if t < self.warmup_t:
            lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
        else:
            t = t - self.warmup_t
            total_t = self.t_initial - self.warmup_t
            lrs = [v - ((v - v * self.lr_min_rate) * (t / total_t)) for v in self.base_values]
        return lrs

    def get_epoch_values(self, epoch: int):
        if self.t_in_epochs:
            return self._get_lr(epoch)
        else:
            return None

    def get_update_values(self, num_updates: int):
        if not self.t_in_epochs:
            return self._get_lr(num_updates)
        else:
            return None


def auto_resume_helper(output_dir):
    """
    auto resume form output dir 
    return the latest ckpt for loadding
    """
    checkpoints = os.listdir(output_dir)
    checkpoints = [ckpt for ckpt in checkpoints if ckpt.endswith('pth')]
    print(f"All checkpoints founded in {output_dir}: {checkpoints}")
    if len(checkpoints) > 0:
        latest_checkpoint = max([os.path.join(output_dir, d) for d in checkpoints], key=os.path.getmtime)
        print(f"The latest checkpoint founded: {latest_checkpoint}")
        resume_file = latest_checkpoint
    else:
        resume_file = None
    return resume_file

def load_checkpoint(config, model, optimizer, lr_scheduler, logger, task):
    """
    load ckpt 
    """
    logger.info(f"==============> Resuming form {config.train.model_resume}....................")

    checkpoint = torch.load(config.train.model_resume, map_location='cpu')
    ckpt_task_name = None
    if 'task' in checkpoint:
        ckpt_task_name = checkpoint['task']
        logger.info(f"==============> load {ckpt_task_name} ckpt to current {task} job....................")
    if task == "cls":
        if 'model' in checkpoint:
            msg = model.load_state_dict(checkpoint['model'], strict=False)
            msg = model.backbone.load_state_dict(checkpoint['model'], strict=False)
        elif 'state_dict' in checkpoint:
            msg = model.load_state_dict(checkpoint['state_dict'], strict=False)
            msg = model.backbone.load_state_dict(checkpoint['state_dict'], strict=False)
    elif task == "detection":
        if 'model' in checkpoint:
            msg = model.load_state_dict(checkpoint['model'], strict=False)
        elif 'state_dict' in checkpoint:
            msg = model.load_state_dict(checkpoint['state_dict'], strict=False)
    logger.info(msg)
    max_accuracy = 0.0
    
    if ckpt_task_name == task:
        if 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
            optimizer.load_state_dict(checkpoint['optimizer'])
            lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
    
            config.train.start_epoch = checkpoint['epoch'] + 1
            
            logger.info(f"=> loaded successfully '{config.train.model_resume}' (epoch {checkpoint['epoch']})")
    else:
        logger.info(f"==============> do not use the optimizer、lr_scheduler in ckpt, run from scratch....................")
    
    if 'max_accuracy' in checkpoint:
        max_accuracy = checkpoint['max_accuracy']

    del checkpoint
    torch.cuda.empty_cache()
    return max_accuracy

def save_checkpoint(args, epoch, model, max_accuracy, optimizer, lr_scheduler, logger, task_name = "detection"):
    save_state = {'model': model.state_dict(),
                  'optimizer': optimizer.state_dict(),
                  'lr_scheduler': lr_scheduler.state_dict(),
                  'max_accuracy': max_accuracy,
                  'epoch': epoch,
                  'task': task_name,
                  'config': args}
    save_path = os.path.join(args.output, f'{task_name}_epoch_{epoch}_trainloss_{max_accuracy}.pth')
    logger.info(f"{save_path} saving......")
    torch.save(save_state, save_path)
    logger.info(f"{save_path} saved !!!")

def show_result(img,
                result,
                score_thr=0.3,
                bbox_color=(72, 101, 241),
                text_color=(72, 101, 241),
                mask_color=None,
                thickness=2,
                font_size=13,
                win_name='',
                show=False,
                wait_time=0,
                out_file=None,
                classes=None):
        """Draw `result` over `img`.

        Args:
            img (str or Tensor): The image to be displayed.
            result (Tensor or tuple): The results to draw over `img`
                bbox_result or (bbox_result, segm_result).
            score_thr (float, optional): Minimum score of bboxes to be shown.
                Default: 0.3.
            bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
               The tuple of color should be in BGR order. Default: 'green'
            text_color (str or tuple(int) or :obj:`Color`):Color of texts.
               The tuple of color should be in BGR order. Default: 'green'
            mask_color (None or str or tuple(int) or :obj:`Color`):
               Color of masks. The tuple of color should be in BGR order.
               Default: None
            thickness (int): Thickness of lines. Default: 2
            font_size (int): Font size of texts. Default: 13
            win_name (str): The window name. Default: ''
            wait_time (float): Value of waitKey param.
                Default: 0.
            show (bool): Whether to show the image.
                Default: False.
            out_file (str or None): The filename to write the image.
                Default: None.

        Returns:
            img (Tensor): Only if not `show` or `out_file`
        """
        import mmcv
        import numpy as np
        img = mmcv.imread(img)
        img = img.copy()
        if isinstance(result, tuple):
            bbox_result, segm_result = result
            if isinstance(segm_result, tuple):
                segm_result = segm_result[0]  # ms rcnn
        else:
            bbox_result, segm_result = result, None
        bboxes = np.vstack(bbox_result)
        labels = [
            np.full(bbox.shape[0], i, dtype=np.int32)
            for i, bbox in enumerate(bbox_result)
        ]
        labels = np.concatenate(labels)
        # draw segmentation masks
        segms = None
        if segm_result is not None and len(labels) > 0:  # non empty
            segms = mmcv.concat_list(segm_result)
            if isinstance(segms[0], torch.Tensor):
                segms = torch.stack(segms, dim=0).detach().cpu().numpy()
            else:
                segms = np.stack(segms, axis=0)
        # if out_file specified, do not show image in window
        if out_file is not None:
            show = False
        # draw bounding boxes
        from mmdet.core.visualization.image import imshow_det_bboxes
        img = imshow_det_bboxes(
            img,
            bboxes,
            labels,
            segms,
            class_names=classes,
            score_thr=score_thr,
            bbox_color=bbox_color,
            text_color=text_color,
            mask_color=mask_color,
            thickness=thickness,
            font_size=font_size,
            win_name=win_name,
            show=show,
            wait_time=wait_time,
            out_file=out_file)

        if not (show or out_file):
            return img

## fellow used in cls
def check_keywords_in_name(name, keywords=()):
    isin = False
    for keyword in keywords:
        if keyword in name:
            isin = True
    return isin

def check_keywords_in_dict(name, keywords_dict):
    for k, v in keywords_dict.items():
        if k in name:
            return v
    return None

def set_weight_decay_and_lr(
    model,
    weight_decay,
    base_lr,
    skip_list=(),
    skip_keywords=(),
    lr_layer_decay=None,
    lr_layer_decay_ratio=None,
    freeze_backbone=None,
    dcn_lr_mul=None,
    layerwise_lr=True,
):
    parameters = []
    no_decay_name = []
    lr_ratio_log = {}

    for name, param in model.named_parameters():
        if not param.requires_grad:
            continue  # frozen weights
        if freeze_backbone:
            for i in freeze_backbone:
                if f'levels.{i}' in name:
                    param.requires_grad = False
        # 1. check wd
        if len(param.shape) == 1 or name.endswith(".bias") or (
                name in skip_list) or check_keywords_in_name(
                    name, skip_keywords):
            wd = 0.
            no_decay_name.append(name)
        else:
            wd = weight_decay
        if lr_layer_decay:
            print('layer-wise lr decay is used !')
            assert hasattr(model, 'lr_decay_keywards')
            lr_ratio_keywards = model.lr_decay_keywards(lr_layer_decay_ratio)

            # 2. check lr
            ratio = check_keywords_in_dict(name, lr_ratio_keywards)
            if ratio is not None:
                lr = ratio * base_lr
            else:
                lr = base_lr

            # dcn lr
            if dcn_lr_mul is not None:
                if 'offset' in name or 'attention_weights' in name or 'center_feature_scale_proj' in name or 'alpha_beta' in name:
                    lr = dcn_lr_mul * lr

            lr_ratio_log[name] = (base_lr, ratio, wd, param.requires_grad)
        else:
            lr = base_lr
        parameters.append({'params': [param], 'weight_decay': wd, 'lr': lr, 'name': name})

    print('no decay params: {no_decay_name}')
    if layerwise_lr:
        print('lr_ratio_params:')
        for k, v in lr_ratio_log.items():
            print(k, v)

    return parameters


def build_cls_optimizer(config, model):
    """
    Build optimizer, set weight decay of normalization to 0 by default.
    """
    skip = {}
    skip_keywords = {}
    if hasattr(model, 'no_weight_decay'):
        skip = model.no_weight_decay()
    if hasattr(model, 'no_weight_decay_keywords'):
        skip_keywords = model.no_weight_decay_keywords()

    parameters = set_weight_decay_and_lr(
        model,
        config.cls_config.train.weight_decay,
        config.cls_config.train.base_lr,
        skip,
        skip_keywords,
        lr_layer_decay=config.cls_config.lr_layer_decay,
        lr_layer_decay_ratio=config.cls_config.lr_layer_decay_ratio,
        freeze_backbone=config.cls_config.optimizer.freeze_backbone,
        dcn_lr_mul=config.cls_config.optimizer.dcn_lr_mul,
    )

    opt_lower = config.cls_config.optimizer.name.lower()
    optimizer = None
    use_zero = config.cls_config.optimizer.use_zero
    if use_zero:
        pass
        # print(f"\nUse Zero!")
        # if opt_lower == 'sgd':
        #     # an ugly implementation
        #     # this problem is fixed after torch 1.12
        #     # https://github.com/pytorch/pytorch/issues/71347

        #     # before 1.12, we could only pass list to zero optimizer, so we first pass parameters[0] with its lr and weight decay,
        #     # then we add other parameter via parameter group.

        #     optimizer = ZeroRedundancyOptimizer(
        #         parameters[0]['params'],
        #         optimizer_class=optim.SGD,
        #         momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,
        #         lr=parameters[0]['lr'], weight_decay=parameters[0]['weight_decay']
        #     )
        #     if len(parameters) > 1:
        #         for param_group in parameters[1:]:
        #             optimizer.add_param_group(param_group)
        # elif opt_lower == 'adamw':
        #     optimizer = ZeroRedundancyOptimizer(
        #         parameters[0]['params'],
        #         optimizer_class=optim.AdamW,
        #         eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,
        #         lr=parameters[0]['lr'], weight_decay=parameters[0]['weight_decay']
        #     )
        #     if len(parameters) > 1:
        #         for param_group in parameters[1:]:
        #             optimizer.add_param_group(param_group)
    else:
        if opt_lower == 'sgd':
            optimizer = optim.SGD(parameters,
                                  momentum=config.TRAIN.OPTIMIZER.MOMENTUM,
                                  nesterov=True,
                                  lr=config.TRAIN.BASE_LR,
                                  weight_decay=config.TRAIN.WEIGHT_DECAY)
        elif opt_lower == 'adamw':
            optimizer = optim.AdamW(parameters,
                                    eps=config.cls_config.optimizer.eps,
                                    betas=config.cls_config.optimizer.betas,
                                    lr=config.cls_config.train.base_lr,
                                    weight_decay=config.cls_config.train.weight_decay)

    return optimizer

# import torch
# from ._functions import Scatter, Gather

# def is_namedtuple(obj):
#     # Check if type was created from collections.namedtuple or a typing.NamedTuple.
#     return (
#         isinstance(obj, tuple) and hasattr(obj, "_asdict") and hasattr(obj, "_fields")
#     )


# def scatter(inputs, target_gpus, dim=0):
#     r"""
#     Slices tensors into approximately equal chunks and
#     distributes them across given GPUs. Duplicates
#     references to objects that are not tensors.
#     """
#     def scatter_map(obj):
#         if isinstance(obj, torch.Tensor):
#             return Scatter.apply(target_gpus, None, dim, obj)
#         if is_namedtuple(obj):
#             return [type(obj)(*args) for args in zip(*map(scatter_map, obj))]
#         if isinstance(obj, tuple) and len(obj) > 0:
#             return list(zip(*map(scatter_map, obj)))
#         if isinstance(obj, list) and len(obj) > 0:
#             return [list(i) for i in zip(*map(scatter_map, obj))]
#         if isinstance(obj, dict) and len(obj) > 0:
#             return [type(obj)(i) for i in zip(*map(scatter_map, obj.items()))]
#         return [obj for targets in target_gpus]

#     # After scatter_map is called, a scatter_map cell will exist. This cell
#     # has a reference to the actual function scatter_map, which has references
#     # to a closure that has a reference to the scatter_map cell (because the
#     # fn is recursive). To avoid this reference cycle, we set the function to
#     # None, clearing the cell
#     try:
#         res = scatter_map(inputs)
#     finally:
#         scatter_map = None
#     return res


# def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
#     r"""Scatter with support for kwargs dictionary"""
#     inputs = scatter(inputs, target_gpus, dim) if inputs else []
#     kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
#     if len(inputs) < len(kwargs):
#         inputs.extend(() for _ in range(len(kwargs) - len(inputs)))
#     elif len(kwargs) < len(inputs):
#         kwargs.extend({} for _ in range(len(inputs) - len(kwargs)))
#     inputs = tuple(inputs)
#     kwargs = tuple(kwargs)
#     return inputs, kwargs


# def gather(outputs, target_device, dim=0):
#     r"""
#     Gathers tensors from different GPUs on a specified device.
#     Use 'cpu' for CPU to avoid a deprecation warning.
#     """
#     def gather_map(outputs):
#         out = outputs[0]
#         if isinstance(out, torch.Tensor):
#             return Gather.apply(target_device, dim, *outputs)
#         if out is None:
#             return None
#         if isinstance(out, dict):
#             if not all(len(out) == len(d) for d in outputs):
#                 raise ValueError('All dicts must have the same number of keys')
#             return type(out)((k, gather_map([d[k] for d in outputs]))
#                              for k in out)
#         if is_namedtuple(out):
#             return type(out)._make(map(gather_map, zip(*outputs)))
#         return type(out)(map(gather_map, zip(*outputs)))

#     # Recursive function calls like this create reference cycles.
#     # Setting the function to None clears the refcycle.
#     try:
#         res = gather_map(outputs)
#     finally:
#         gather_map = None
#     return res
