import paddle_aux
import os
import paddle
"""
PyTorch utils
"""
import datetime
import logging
import math
import platform
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
try:
    import thop
except ImportError:
    thop = None
LOGGER = logging.getLogger(__name__)


@contextmanager
def torch_distributed_zero_first(local_rank: int):
    """
    Decorator to make all processes in distributed training wait for each local_master to do something.
    """
    if local_rank not in [-1, 0]:
        paddle.distributed.barrier(device_ids=[local_rank])
    yield
    if local_rank == 0:
        paddle.distributed.barrier(device_ids=[0])


def date_modified(path=__file__):
    t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
    return f'{t.year}-{t.month}-{t.day}'


def git_describe(path=Path(__file__).parent):
    s = f'git -C {path} describe --tags --long --always'
    try:
        return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT
            ).decode()[:-1]
    except subprocess.CalledProcessError as e:
        return ''


def select_device(device='', batch_size=None):
    s = (
        f'YOLOv5 🚀 {git_describe() or date_modified()} torch {paddle.__version__} '
        )
    device = str(device).strip().lower().replace('cuda:', '')
    cpu = device == 'cpu'
    if cpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    elif device:
        os.environ['CUDA_VISIBLE_DEVICES'] = device
        assert paddle.device.cuda.device_count(
            ) >= 1, f'CUDA unavailable, invalid device {device} requested'
    cuda = not cpu and paddle.device.cuda.device_count() >= 1
    if cuda:
        devices = device.split(',') if device else '0'
        n = len(devices)
        if n > 1 and batch_size:
            assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
        space = ' ' * (len(s) + 1)
        for i, d in enumerate(devices):
            p = paddle.device.cuda.get_device_properties(device=i)
            s += (
                f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n"
                )
    else:
        s += 'CPU\n'
    LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() ==
        'Windows' else s)
    return str('cuda:0' if cuda else 'cpu').replace('cuda', 'gpu')


def time_sync():
    if paddle.device.cuda.device_count() >= 1:
        paddle.device.cuda.synchronize()
    return time.time()


def profile(input, ops, n=10, device=None):
    results = []
    logging.basicConfig(format='%(message)s', level=logging.INFO)
    device = device or select_device()
    print(
        f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}{'input':>24s}{'output':>24s}"
        )
    for x in (input if isinstance(input, list) else [input]):
        x = x.to(device)
        x.stop_gradient = not True
        for m in (ops if isinstance(ops, list) else [ops]):
            m = m.to(device) if hasattr(m, 'to') else m
            m = m.astype(dtype='float16') if hasattr(m, 'half') and isinstance(
                x, paddle.Tensor) and x.dtype is 'float16' else m
            tf, tb, t = 0.0, 0.0, [0.0, 0.0, 0.0]
            try:
                flops = thop.profile(m, inputs=(x,), verbose=False)[0
                    ] / 1000000000.0 * 2
            except:
                flops = 0
            try:
                for _ in range(n):
                    t[0] = time_sync()
                    y = m(x)
                    t[1] = time_sync()
                    try:
                        _ = (sum([yi.sum() for yi in y]) if isinstance(y,
                            list) else y).sum().backward()
                        t[2] = time_sync()
                    except Exception as e:
                        t[2] = float('nan')
                    tf += (t[1] - t[0]) * 1000 / n
                    tb += (t[2] - t[1]) * 1000 / n
                mem = paddle.device.cuda.memory_reserved(
                    ) / 1000000000.0 if paddle.device.cuda.device_count(
                    ) >= 1 else 0
                s_in = tuple(tuple(x.shape)) if isinstance(x, paddle.Tensor
                    ) else 'list'
                s_out = tuple(tuple(y.shape)) if isinstance(y, paddle.Tensor
                    ) else 'list'
                p = sum(list(x.size for x in m.parameters())) if isinstance(m,
                    paddle.nn.Layer) else 0
                print(
                    f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}'
                    )
                results.append([p, flops, mem, tf, tb, s_in, s_out])
            except Exception as e:
                print(e)
                results.append(None)
            paddle.device.cuda.empty_cache()
    return results


def is_parallel(model):
    return type(model) in (paddle.DataParallel, paddle.DataParallel)


def de_parallel(model):
    return model.module if is_parallel(model) else model


def intersect_dicts(da, db, exclude=()):
    return {k: v for k, v in da.items() if k in db and not any(x in k for x in
        exclude) and tuple(v.shape) == tuple(db[k].shape)}


def initialize_weights(model):
    for m in model.sublayers():
        t = type(m)
        if t is paddle.nn.Conv2D:
            pass
        elif t is paddle.nn.BatchNorm2D:
            m.eps = 0.001
            m.momentum = 0.03
        elif t in [paddle.nn.Hardswish, paddle.nn.LeakyReLU, paddle.nn.ReLU,
            paddle.nn.ReLU6]:
            m.inplace = True


def find_modules(model, mclass=paddle.nn.Conv2D):
    return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)
        ]


def sparsity(model):
    a, b = 0.0, 0.0
    for p in model.parameters():
        a += p.size
        b += (p == 0).sum()
    return b / a


def prune(model, amount=0.3):
    print('Pruning model... ', end='')
    for name, m in model.named_sublayers():
        if isinstance(m, paddle.nn.Conv2D):
            paddle.nn.utils.prune.l1_unstructured(m, name='weight', amount=
                amount)
            paddle.nn.utils.prune.remove(m, 'weight')
    print(' %.3g global sparsity' % sparsity(model))


def fuse_conv_and_bn(conv, bn):
    out_0 = paddle.nn.Conv2D(in_channels=conv.in_channels, out_channels=
        conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride,
        padding=conv.padding, groups=conv.groups, bias_attr=True)
    out_0.stop_gradient = not False
    fusedconv = out_0.to(conv.weight.place)
    w_conv = conv.weight.clone().view(conv.out_channels, -1)
    w_bn = paddle.diag(x=bn.weight.div(paddle.sqrt(x=bn.eps + bn.running_var)))
    paddle.assign(paddle.mm(input=w_bn, mat2=w_conv).view(tuple(fusedconv.
        weight.shape)), output=fusedconv.weight)
    b_conv = paddle.zeros(shape=conv.weight.shape[0]
        ) if conv.bias is None else conv.bias
    b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(paddle.sqrt(x=bn.
        running_var + bn.eps))
    paddle.assign(paddle.mm(input=w_bn, mat2=b_conv.reshape(-1, 1)).reshape
        (-1) + b_bn, output=fusedconv.bias)
    return fusedconv


def model_info(model, verbose=False, img_size=640):
    n_p = sum(x.size for x in model.parameters())
    n_g = sum(x.size for x in model.parameters() if not x.stop_gradient)
    if verbose:
        print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name',
            'gradient', 'parameters', 'shape', 'mu', 'sigma'))
        for i, (name, p) in enumerate(model.named_parameters()):
            name = name.replace('module_list.', '')
            print('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, not p.
                stop_gradient, p.size, list(tuple(p.shape)), p.mean(), p.std())
                )
    try:
        from thop import profile
        stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride'
            ) else 32
        img = paddle.zeros(shape=(1, model.yaml.get(ch, 3), stride, stride))
        flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0
            ] / 1000000000.0 * 2
        img_size = img_size if isinstance(img_size, list) else [img_size,
            img_size]
        fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] /
            stride)
    except (ImportError, Exception):
        fs = ''
    LOGGER.info(
        f'Model Summary: {len(list(model.sublayers()))} layers, {n_p} parameters, {n_g} gradients{fs}'
        )


def load_classifier(name='resnet101', n=2):
    model = torchvision.models.__dict__[name](pretrained=True)
    filters = tuple(model.fc.weight.shape)[1]
    model.fc.bias = paddle.base.framework.EagerParamBase.from_tensor(tensor
        =paddle.zeros(shape=n), trainable=True)
    model.fc.weight = paddle.base.framework.EagerParamBase.from_tensor(tensor
        =paddle.zeros(shape=[n, filters]), trainable=True)
    model.fc.out_features = n
    return model


def scale_img(img, ratio=1.0, same_shape=False, gs=32):
    if ratio == 1.0:
        return img
    else:
        h, w = tuple(img.shape)[2:]
        s = int(h * ratio), int(w * ratio)
        img = paddle.nn.functional.interpolate(x=img, size=s, mode=
            'bilinear', align_corners=False)
        if not same_shape:
            h, w = [(math.ceil(x * ratio / gs) * gs) for x in (h, w)]
        return paddle.nn.functional.pad(x=img, pad=[0, w - s[1], 0, h - s[0
            ]], value=0.447, pad_from_left_axis=False)


def copy_attr(a, b, include=(), exclude=()):
    for k, v in b.__dict__.items():
        if len(include) and k not in include or k.startswith('_'
            ) or k in exclude:
            continue
        else:
            setattr(a, k, v)


class EarlyStopping:

    def __init__(self, patience=30):
        self.best_fitness = 0.0
        self.best_epoch = 0
        self.patience = patience or float('inf')
        self.possible_stop = False

    def __call__(self, epoch, fitness):
        if fitness >= self.best_fitness:
            self.best_epoch = epoch
            self.best_fitness = fitness
        delta = epoch - self.best_epoch
        self.possible_stop = delta >= self.patience - 1
        stop = delta >= self.patience
        if stop:
            LOGGER.info(
                f"""Stopping training early as no improvement observed in last {self.patience} epochs. Best results observed at epoch {self.best_epoch}, best model saved as best.pt.
To update EarlyStopping(patience={self.patience}) pass a new patience value, i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping."""
                )
        return stop


class ModelEMA:
    """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
    Keep a moving average of everything in the model state_dict (parameters and buffers).
    This is intended to allow functionality like
    https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
    A smoothed version of the weights is necessary for some training schemes to perform well.
    This class is sensitive where it is initialized in the sequence of model init,
    GPU assignment and distributed training wrappers.
    """

    def __init__(self, model, decay=0.9999, updates=0):
        self.ema = deepcopy(model.module if is_parallel(model) else model
            ).eval()
        self.updates = updates
        self.decay = lambda x: decay * (1 - math.exp(-x / 2000))
        for p in self.ema.parameters():
            out_1 = p
            out_1.stop_gradient = not False
            out_1

    def update(self, model):
        with paddle.no_grad():
            self.updates += 1
            d = self.decay(self.updates)
            msd = model.module.state_dict() if is_parallel(model
                ) else model.state_dict()
            for k, v in self.ema.state_dict().items():
                if v.dtype.is_floating_point:
                    v *= d
                    v += (1.0 - d) * msd[k].detach()

    def update_attr(self, model, include=(), exclude=('process_group',
        'reducer')):
        copy_attr(self.ema, model, include, exclude)
