from collections import OrderedDict
from datetime import datetime
from torch import distributed
from torch.utils.tensorboard.writer import SummaryWriter
import torch
from torch import nn
from torch.nn import functional as F
import torchvision
from torchvision.transforms.functional import crop
from torchvision.utils import make_grid
import numpy as np
import seaborn as sns
from tqdm import tqdm
from PIL import Image
class SummaryWriterCount(SummaryWriter):
    def __init__(self, log_dir=None, comment='', purge_step=None, max_queue=10,
                 flush_secs=120, filename_suffix=''):
        super().__init__(log_dir=log_dir, comment=comment, purge_step=purge_step, max_queue=max_queue, flush_secs=flush_secs, filename_suffix=filename_suffix)
        self.global_step = 0
        
    def step(self, global_step=None):
        if global_step is not None:
            self.global_step = global_step
        else:
            self.global_step += 1
        
    def interval(self, in_):
        return ( self.global_step ) % in_ == 0 and self.global_step != 0
    
    def comes_to(self, in_):
        return self.global_step == in_

# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/logger.py

import logging
import os
import sys


def setup_logger(name, save_dir, distributed_rank, filename="log.txt"):
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)
    # don't log results for the non-master process
    if distributed_rank > 0:
        return logger
    ch = logging.StreamHandler(stream=sys.stdout)
    ch.setLevel(logging.INFO)
    formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
    ch.setFormatter(formatter)
    logger.addHandler(ch)

    if save_dir:
        fh = logging.FileHandler(os.path.join(save_dir, filename))
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(formatter)
        logger.addHandler(fh)

    return logger


# class HiddenFeatureRecoder():
#     """
#     record the outputs of [model]'s children modules in [writer].
#     """
#     def __init__(self, model):
#         super().__init__()
#         self.model = model
#         self.hiddenFeature = dict()
#         self.handles = dict()
#         self.filter_list = [ResBlockG, ResBlockD, MaskRegressNet]

#     def hook(self):    
#         for name, layer in self.model.named_modules():
#             if any(isinstance(layer, l) for l in self.filter_list):
#                 # hook = lambda module, module_in, module_out: self.hiddenFeature.setdefault(name, module_out.clone().detach().cpu());module_out
#                 def this_hook(module, module_in, module_out, this_name=name):
#                     self.hiddenFeature.setdefault(this_name, module_out.clone().detach().cpu())
#                     return module_out
#                 self.handles[name] = layer.register_forward_hook(this_hook)
    
#     def remove(self):
#         for v in self.handles.values():
#             v.remove()

#     def write(self, writer, prefix=None, step=0):
#         prefix = '' if prefix is None else prefix+'/'
#         for k, v in self.hiddenFeature.items():
#             v = v[0].detach().cpu()
#             if v.dim()==3:
#                 v = v.unsqueeze(1)
#             writer.add_image(prefix+k, make_grid(v, nrow=8), step)
#         self.hiddenFeature = dict()


def truncted_random(num_o=8, thres=1.0, dim=128):
    z = torch.randn(1, num_o, dim)
    truncation_flag = (z.abs()>thres).float()
    return truncation_flag * torch.randn(1, num_o, dim) + (1.-truncation_flag) * z


def para_statics(in_):
    return {
                "mean": in_.mean().item(),
                "std":  in_.std().item(),
                "abs_mean": in_.abs().mean().item(),
                "abs_std":  in_.abs().std().item(),
                "abs_max":  in_.abs().max().item()
            }

def write_weights_grad(writer, model, prefix=None, step=0, scalar:bool=False):
    """
    record the parameters and gradient in [model] in the [writer].
    """
    prefix_weight = '' if prefix is None else prefix+'_weight/'
    prefix_grad = 'grad' if prefix is None else prefix+'_grad/'
    for name, param in model.named_parameters():
        weights = param.detach().clone().cpu()
        writer.add_histogram(f'{prefix_weight}{name}', weights, step)
        if scalar:
            writer.add_scalars(f'{prefix_weight}{name}', para_statics(weights), step)
        if param.grad is not None:
            try:
                grads = param.grad.detach().clone().cpu()
                writer.add_histogram(f'{prefix_grad}{name}', grads, step)
                if scalar:
                    writer.add_scalars(f'{prefix_grad}{name}', para_statics(grads), step)
            except AttributeError:
                pass



def num_iter(iterable_, num: int):
    """num_iter wrap [iterable_] which iterates [num] times

    Args:
        iterable_ (iterable): the iterator
        num (int): times to iterate
    
    Examples:
        dataloader = tqdm(num_iter(dataloader, num_iter), total=num_iter)
        for iter_, data in enumerate(dataloader):
            pass
    """
    count = 0
    while count<num:
        for x in iterable_:
            yield x
            count += 1
            if count >= num:
                return


def keyword_dict(model: torch.nn.Module, keywords: list, func = None) -> dict: 
    """keyword_dict returns the average of parameters in {keywords} of a given {model}, used in tensorboard

    Args:
        model (torch.nn.Module): the model to extract parameters
        keywords (list): keywords

    Returns:
        dict: the mean parameter dict
    """    
    func = (lambda x: x) if func is None else func
    return {name: func(param.detach()).mean() for name, param in model.named_parameters() if keywords in name}



def onefetch(dataset, batch_size, num_workers=16, sampler=None):

    tmp = [samples for samples in tqdm(torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False,\
            drop_last=False, sampler=sampler, num_workers=num_workers), desc='fetch all samples')]
    list_samples = [torch.cat(t, dim=0) for t in zip( *tmp )]
    transformed = torch.utils.data.TensorDataset(*list_samples)
    return transformed
    


def Gcounter(interval:int = 1, 
             *, 
             warmup:int = 100, 
             warmup_interval:int = 5, 
             rewarm:int = 1_000):
    """Gcounter Help to decide wheter to train G. Train D more every [rewarm] steps.

    Args:
        interval (int, optional): the interval to train G. Defaults to 1.
        warmup (int, optional): the length. Defaults to 30.
        warmup_interval (int, optional): the interval to train G in the warmup period. Defaults to 10.
        rewarm (int, optional): step to rewarm. Defaults to 1_000.

    Yields:
        python generator: a generator to yield True/False.
        
    Examples:
        trainG = Gcounter()
        ...
        if next(trainG):
            [train G]
    """
    # assert isinstance(interval, int) and interval > 0
    # assert isinstance(warmup, int) and warmup > 0
    # assert isinstance(warmup_interval, int) and warmup_interval > 0
    # assert isinstance(rewarm, int) and rewarm > 0
    counter = 0
    while True:
        counter += 1
        if counter < warmup:
            yield counter % warmup_interval == 0
        else:
            yield counter % interval == 0
        if counter > rewarm:
            counter = 0

def colormap(num_:int = 10, 
             as_array: bool = False, 
             as_cmap: bool = False, 
             *, 
             palette_type: str = 'hls'):
    """colormap return the default consistent colormap

    Args:
        num_ (int, optional): the number of colors. Defaults to 10.
        as_array (bool, optional): return an 0~255 color array. Defaults to False.
        as_cmap (bool, optional): return matplotlib.color.Colormap. Defaults to False.
    """
    d = {'hls': sns.hls_palette, 'husl':sns.husl_palette}
    palette = d.get(palette_type, sns.hls_palette)
    if as_array:
        return (np.array(palette(num_, s=0.4)) * 255).astype(np.int16)
    elif as_cmap:
        return palette(num_, s=0.4 , as_cmap=as_cmap)
    else:
        return palette(num_, s=0.4)



def crop_images(samples, bbox, crop_size=64):
    img_size = samples.size(2) # b 3 w h
    # print(samples.size())
    bbox_refmt = bbox.mul(img_size).long().tolist() # b o 4
    result = []
    for r, box in zip(samples.split(1, dim=0), bbox_refmt):
        # print(r.size())
        # print(box)
        # print(F.interpolate(torchvision.transforms.functional.crop(r, *box[0]), size = (crop_size, crop_size)).size())
        tmp = [ F.interpolate( torchvision.transforms.functional.crop(r, *b), size=(crop_size, crop_size), mode="bilinear", align_corners=True )  for b in box ]
        # print(torch.cat(tmp, dim=0).size())
        result.append( torch.cat(tmp, dim=0) ) # o 3 w h
    return torch.stack( result, dim=0 )

    # bbox_refmt = torchvision.ops.box_convert(bbox, 'xywh', 'xyxy') # b o 4
    # bbox_refmt *= img_size # 

    # cropped = torchvision.ops.roi_align(real_samples, [ b for b in bbox_refmt], crop_size, aligned=True) # bo 3 w h
    # return torch.stack(cropped.split(bbox.size(1), dim=0), dim=0) # b o 3 w h

def truncted_random(in_, *, thres=2.0):
    with torch.no_grad():
        for _ in range(3):
            z = torch.randn_like(in_)
            truncation_flag = (in_.abs()>thres).float()
            in_ = truncation_flag * z + (1.-truncation_flag) * in_.detach()
    return in_.detach()


def load_model(model:nn.Module, model_path: str, device) -> nn.Module:
    """ load {model} from {model_path}
    """
    state_dict = torch.load(model_path, map_location=device)

    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:] if k.startswith("module.") else k # remove `module.`nvidia
        new_state_dict[name] = v

    model_dict = model.state_dict()
    pretrained_dict = {k: v for k, v in new_state_dict.items() if k in model_dict}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    return model


def draw_mask(mask:np.ndarray, label:list, colormap:np.ndarray, return_Image:bool = True):
    """
    mask o * w * h
    """
    # bbmap_raw, bbmap_adjust = raw_mask.cpu().numpy(), mask.cpu().numpy()
    mask = mask.cpu().numpy()
    mask /= mask.sum(0) + 1e-8
    result = np.zeros([mask.shape[1], mask.shape[2], 3])
    for idx_, l in enumerate(label):
        if l>0:
            result += np.stack([mask[idx_]], axis=2) * np.reshape(colormap[l], [1,1,3])
    result = np.floor(result).astype(np.uint8)
    return Image.fromarray( result ) if return_Image else result



import DSmodels
class DSscore(nn.Module):
    def __init__(self, use_gpu=True):
        super().__init__()
        model = DSmodels.PerceptualLoss(model='net-lin', net='alex', use_gpu=use_gpu, version="0.1")
        self.model = model.cuda() if use_gpu else model
        self.results = []

    def forward(self, img_generated, img_real):
        img_generated = img_generated.detach()
        img_real = img_real.type_as(img_generated)
        with torch.no_grad():
            result = self.model.forward(img_generated, img_real)
        self.results.append(result)
        return result

    def mean_std(self):
        if len(self.results) == 0:
            return 0, -1
        result = torch.cat(self.results, dim=0)
        mean = result.mean().item()
        std = result.std().item()
        self.results = []
        return mean, std


@torch.no_grad()
def ema_update_bn(loader, model, device=None):
    r"""Updates BatchNorm running_mean, running_var buffers in the model.
    It performs one pass over data in `loader` to estimate the activation
    statistics for BatchNorm layers in the model.
    Args:
        loader (torch.utils.data.DataLoader): dataset loader to compute the
            activation statistics on. Each data batch should be either a
            tensor, or a list/tuple whose first element is a tensor
            containing data.
        model (torch.nn.Module): model for which we seek to update BatchNorm
            statistics.
        device (torch.device, optional): If set, data will be transferred to
            :attr:`device` before being passed into :attr:`model`.
    Example:
        >>> loader, model = ...
        >>> torch.optim.swa_utils.update_bn(loader, model)
    .. note::
        The `update_bn` utility assumes that each data batch in :attr:`loader`
        is either a tensor or a list or tuple of tensors; in the latter case it
        is assumed that :meth:`model.forward()` should be called on the first
        element of the list or tuple corresponding to the data batch.
    """
    momenta = {}
    for module in model.modules():
        if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
            module.running_mean = torch.zeros_like(module.running_mean)
            module.running_var = torch.ones_like(module.running_var)
            momenta[module] = module.momentum

    if not momenta:
        return

    was_training = model.training
    model.train()
    for module in momenta.keys():
        module.momentum = None
        module.num_batches_tracked *= 0

    for input in tqdm(loader, desc='update_bn', leave=False):
        if isinstance(input, (list, tuple)):
            pass
        else:
            input = [input]
        if device is not None:
            input = [i.to(device) for i in input]

        model(*input)

    for bn_module in momenta.keys():
        bn_module.momentum = momenta[bn_module]
    model.train(was_training)
    
    
coco_cats = {1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane', 6: 'bus', 7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light', 11: 'fire hydrant', 13: 'stop sign', 14: 'parking meter', 15: 'bench', 16: 'bird', 17: 'cat', 18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow', 22: 'elephant', 23: 'bear', 24: 'zebra', 25: 'giraffe', 27: 'backpack', 28: 'umbrella', 31: 'handbag', 32: 'tie', 33: 'suitcase', 34: 'frisbee', 35: 'skis', 36: 'snowboard', 37: 'sports ball', 38: 'kite', 39: 'baseball bat', 40: 'baseball glove', 41: 'skateboard', 42: 'surfboard', 43: 'tennis racket', 44: 'bottle', 46: 'wine glass', 47: 'cup', 48: 'fork', 49: 'knife', 50: 'spoon', 51: 'bowl', 52: 'banana', 53: 'apple', 54: 'sandwich', 55: 'orange', 56: 'broccoli', 57: 'carrot', 58: 'hot dog', 59: 'pizza', 60: 'donut', 61: 'cake', 62: 'chair', 63: 'couch', 64: 'potted plant', 65: 'bed', 67: 'dining table', 70: 'toilet', 72: 'tv', 73: 'laptop', 74: 'mouse', 75: 'remote', 76: 'keyboard', 77: 'cell phone', 78: 'microwave', 79: 'oven', 80: 'toaster', 81: 'sink', 82: 'refrigerator', 84: 'book', 85: 'clock', 86: 'vase', 87: 'scissors', 88: 'teddy bear', 89: 'hair drier', 90: 'toothbrush', 92: 'banner', 93: 'blanket', 94: 'branch', 95: 'bridge', 96: 'building-other', 97: 'bush', 98: 'cabinet', 99: 'cage', 100: 'cardboard', 101: 'carpet', 102: 'ceiling-other', 103: 'ceiling-tile', 104: 'cloth', 105: 'clothes', 106: 'clouds', 107: 'counter', 108: 'cupboard', 109: 'curtain', 110: 'desk-stuff', 111: 'dirt', 112: 'door-stuff', 113: 'fence', 114: 'floor-marble', 115: 'floor-other', 116: 'floor-stone', 117: 'floor-tile', 118: 'floor-wood', 119: 'flower', 120: 'fog', 121: 'food-other', 122: 'fruit', 123: 'furniture-other', 124: 'grass', 125: 'gravel', 126: 'ground-other', 127: 'hill', 128: 'house', 129: 'leaves', 130: 'light', 131: 'mat', 132: 'metal', 133: 'mirror-stuff', 134: 'moss', 135: 'mountain', 136: 'mud', 137: 'napkin', 138: 'net', 139: 'paper', 140: 'pavement', 141: 'pillow', 142: 'plant-other', 143: 'plastic', 144: 'platform', 145: 'playingfield', 146: 'railing', 147: 'railroad', 148: 'river', 149: 'road', 150: 'rock', 151: 'roof', 152: 'rug', 153: 'salad', 154: 'sand', 155: 'sea', 156: 'shelf', 157: 'sky-other', 158: 'skyscraper', 159: 'snow', 160: 'solid-other', 161: 'stairs', 162: 'stone', 163: 'straw', 164: 'structural-other', 165: 'table', 166: 'tent', 167: 'textile-other', 168: 'towel', 169: 'tree', 170: 'vegetable', 171: 'wall-brick', 172: 'wall-concrete', 173: 'wall-other', 174: 'wall-panel', 175: 'wall-stone', 176: 'wall-tile', 177: 'wall-wood', 178: 'water-other', 179: 'waterdrops', 180: 'window-blind', 181: 'window-other', 182: 'wood', 183: 'other'}


def check_nan(info, *args, **kwargs):
    for ii, x in enumerate(args):
        try:
            if torch.isnan(x).any():
                print(f"{info} parameter {ii} has nan")
        except TypeError:
            print(x)
            print(info)
            
    for k, v in kwargs.items():
        if torch.isnan(v).any():
            print(f"{info} parameter {k} has nan")