from utils import paddle_aux
import paddle
"""
Common modules
"""
import json
import math
import platform
import warnings
from collections import OrderedDict, namedtuple
from copy import copy
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import requests
import yaml
from PIL import Image
from models.attention.cbam import ChannelAttention, SpatialAttention
from models.attention.se import SELayer
from utils.activations import h_swish, h_sigmoid
from utils.datasets import exif_transpose, letterbox
from utils.general import LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import copy_attr, time_sync


def autopad(k, p=None):
    if p is None:
        p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
    return p


class Conv(paddle.nn.Layer):

    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
        super().__init__()
        self.conv = paddle.nn.Conv2D(in_channels=c1, out_channels=c2,
            kernel_size=k, stride=s, padding=autopad(k, p), groups=g,
            bias_attr=False)
        self.bn = paddle.nn.BatchNorm2D(num_features=c2)
        self.act = paddle.nn.Silu() if act is True else act if isinstance(act,
            paddle.nn.Layer) else paddle.nn.Identity()

    def forward(self, x):
        return self.act(self.bn(self.conv(x)))

    def forward_fuse(self, x):
        return self.act(self.conv(x))


class DWConv(Conv):

    def __init__(self, c1, c2, k=1, s=1, act=True):
        super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)


class PW_Conv(paddle.nn.Layer):

    def __init__(self, c1, c2):
        super(PW_Conv, self).__init__()
        self.conv = paddle.nn.Conv2D(in_channels=c1, out_channels=c2,
            kernel_size=1, stride=1, padding=0, bias_attr=False)
        self.bn = paddle.nn.BatchNorm2D(num_features=c2)
        self.act = paddle.nn.ReLU6()

    def forward(self, x):
        return self.act(self.bn(self.conv(x)))


class TransformerLayer(paddle.nn.Layer):

    def __init__(self, c, num_heads):
        super().__init__()
        self.q = paddle.nn.Linear(in_features=c, out_features=c, bias_attr=
            False)
        self.k = paddle.nn.Linear(in_features=c, out_features=c, bias_attr=
            False)
        self.v = paddle.nn.Linear(in_features=c, out_features=c, bias_attr=
            False)
        self.ma = paddle.nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
        self.fc1 = paddle.nn.Linear(in_features=c, out_features=c,
            bias_attr=False)
        self.fc2 = paddle.nn.Linear(in_features=c, out_features=c,
            bias_attr=False)

    def forward(self, x):
        x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
        x = self.fc2(self.fc1(x)) + x
        return x


class TransformerBlock(paddle.nn.Layer):

    def __init__(self, c1, c2, num_heads, num_layers):
        super().__init__()
        self.conv = None
        if c1 != c2:
            self.conv = Conv(c1, c2)
        self.linear = paddle.nn.Linear(in_features=c2, out_features=c2)
        self.tr = paddle.nn.Sequential(*(TransformerLayer(c2, num_heads) for
            _ in range(num_layers)))
        self.c2 = c2

    def forward(self, x):
        if self.conv is not None:
            x = self.conv(x)
        b, _, w, h = tuple(x.shape)
        p = x.flatten(start_axis=2).transpose(perm=[2, 0, 1])
        return self.tr(p + self.linear(p)).transpose(perm=[1, 2, 0]).reshape(b,
            self.c2, w, h)


class Bottleneck(paddle.nn.Layer):

    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):
        super().__init__()
        c_ = int(c2 * e)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_, c2, 3, 1, g=g)
        self.add = shortcut and c1 == c2

    def forward(self, x):
        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))


class BottleneckCSP(paddle.nn.Layer):

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        super().__init__()
        c_ = int(c2 * e)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = paddle.nn.Conv2D(in_channels=c1, out_channels=c_,
            kernel_size=1, stride=1, bias_attr=False)
        self.cv3 = paddle.nn.Conv2D(in_channels=c_, out_channels=c_,
            kernel_size=1, stride=1, bias_attr=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = paddle.nn.BatchNorm2D(num_features=2 * c_)
        self.act = paddle.nn.Silu()
        self.m = paddle.nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=
            1.0) for _ in range(n)))

    def forward(self, x):
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        return self.cv4(self.act(self.bn(paddle.concat(x=(y1, y2), axis=1))))


class C3(paddle.nn.Layer):

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        super().__init__()
        c_ = int(c2 * e)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c1, c_, 1, 1)
        self.cv3 = Conv(2 * c_, c2, 1)
        self.m = paddle.nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=
            1.0) for _ in range(n)))

    def forward(self, x):
        return self.cv3(paddle.concat(x=(self.m(self.cv1(x)), self.cv2(x)),
            axis=1))


class C3TR(C3):

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        super().__init__(c1, c2, n, shortcut, g, e)
        c_ = int(c2 * e)
        self.m = TransformerBlock(c_, c_, 4, n)


class C3SPP(C3):

    def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
        super().__init__(c1, c2, n, shortcut, g, e)
        c_ = int(c2 * e)
        self.m = SPP(c_, c_, k)


class C3Ghost(C3):

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        super().__init__(c1, c2, n, shortcut, g, e)
        c_ = int(c2 * e)
        self.m = paddle.nn.Sequential(*(GhostBottleneck(c_, c_) for _ in
            range(n)))


class CBAMC3(paddle.nn.Layer):

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        super(CBAMC3, self).__init__()
        c_ = int(c2 * e)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c1, c_, 1, 1)
        self.cv3 = Conv(2 * c_, c2, 1)
        self.m = paddle.nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=
            1.0) for _ in range(n)])
        self.channel_attention = ChannelAttention(c2, 16)
        self.spatial_attention = SpatialAttention()

    def forward(self, x):
        out = self.spatial_attention(self.channel_attention(self.cv3(paddle
            .concat(x=(self.m(self.cv1(x)), self.cv2(x)), axis=1))))
        return out


class SPP(paddle.nn.Layer):

    def __init__(self, c1, c2, k=(5, 9, 13)):
        super().__init__()
        c_ = c1 // 2
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
        self.m = paddle.nn.LayerList(sublayers=[paddle.nn.MaxPool2D(
            kernel_size=x, stride=1, padding=x // 2) for x in k])

    def forward(self, x):
        x = self.cv1(x)
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            return self.cv2(paddle.concat(x=[x] + [m(x) for m in self.m],
                axis=1))


class SPPF(paddle.nn.Layer):

    def __init__(self, c1, c2, k=5):
        super().__init__()
        c_ = c1 // 2
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_ * 4, c2, 1, 1)
        self.m = paddle.nn.MaxPool2D(kernel_size=k, stride=1, padding=k // 2)

    def forward(self, x):
        x = self.cv1(x)
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            y1 = self.m(x)
            y2 = self.m(y1)
            return self.cv2(paddle.concat(x=[x, y1, y2, self.m(y2)], axis=1))


class Focus(paddle.nn.Layer):

    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
        super().__init__()
        self.conv = Conv(c1 * 4, c2, k, s, p, g, act)

    def forward(self, x):
        return self.conv(paddle.concat(x=[x[..., ::2, ::2], x[..., 1::2, ::
            2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], axis=1))


class GhostConv(paddle.nn.Layer):

    def __init__(self, c1, c2, k=1, s=1, g=1, act=True):
        super().__init__()
        c_ = c2 // 2
        self.cv1 = Conv(c1, c_, k, s, None, g, act)
        self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)

    def forward(self, x):
        y = self.cv1(x)
        return paddle.concat(x=[y, self.cv2(y)], axis=1)


class GhostBottleneck(paddle.nn.Layer):

    def __init__(self, c1, c2, k=3, s=1):
        super().__init__()
        c_ = c2 // 2
        self.conv = paddle.nn.Sequential(GhostConv(c1, c_, 1, 1), DWConv(c_,
            c_, k, s, act=False) if s == 2 else paddle.nn.Identity(),
            GhostConv(c_, c2, 1, 1, act=False))
        self.shortcut = paddle.nn.Sequential(DWConv(c1, c1, k, s, act=False
            ), Conv(c1, c2, 1, 1, act=False)
            ) if s == 2 else paddle.nn.Identity()

    def forward(self, x):
        return self.conv(x) + self.shortcut(x)


class Contract(paddle.nn.Layer):

    def __init__(self, gain=2):
        super().__init__()
        self.gain = gain

    def forward(self, x):
        b, c, h, w = tuple(x.shape)
        s = self.gain
        x = x.view(b, c, h // s, s, w // s, s)
        x = x.transpose(perm=[0, 3, 5, 1, 2, 4]).contiguous()
        return x.view(b, c * s * s, h // s, w // s)


class Expand(paddle.nn.Layer):

    def __init__(self, gain=2):
        super().__init__()
        self.gain = gain

    def forward(self, x):
        b, c, h, w = tuple(x.shape)
        s = self.gain
        x = x.view(b, s, s, c // s ** 2, h, w)
        x = x.transpose(perm=[0, 3, 4, 1, 5, 2]).contiguous()
        return x.view(b, c // s ** 2, h * s, w * s)


class Concat(paddle.nn.Layer):

    def __init__(self, dimension=1):
        super().__init__()
        self.d = dimension

    def forward(self, x):
        return paddle.concat(x=x, axis=self.d)


class DetectMultiBackend(paddle.nn.Layer):

    def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None
        ):
        from models.experimental import attempt_download, attempt_load
        super().__init__()
        w = str(weights[0] if isinstance(weights, list) else weights)
        (pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite,
            edgetpu, tfjs) = self.model_type(w)
        stride, names = 64, [f'class{i}' for i in range(1000)]
        w = attempt_download(w)
        if data:
            with open(data, errors='ignore') as f:
                names = yaml.safe_load(f)['names']
        if pt:
            model = attempt_load(weights if isinstance(weights, list) else
                w, map_location=device)
            stride = max(int(model.stride.max()), 32)
            """Class Attribute: torch.Tensor.names, can not convert, please check whether it is torch.Tensor.*/torch.autograd.function.FunctionCtx.*/torch.distributions.Distribution.* and convert manually"""
            names = model.module.names if hasattr(model, 'module'
                ) else model.names
            self.model = model
        elif jit:
            LOGGER.info(f'Loading {w} for TorchScript inference...')
            extra_files = {'config.txt': ''}
            model = paddle.jit.load(w, _extra_files=extra_files)
            if extra_files['config.txt']:
                d = json.loads(extra_files['config.txt'])
                stride, names = int(d['stride']), d['names']
        elif dnn:
            LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
            check_requirements(('opencv-python>=4.5.4',))
            net = cv2.dnn.readNetFromONNX(w)
        elif onnx:
            LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
            cuda = paddle.device.cuda.device_count() >= 1
            check_requirements(('onnx', 'onnxruntime-gpu' if cuda else
                'onnxruntime'))
            import onnxruntime
            providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'
                ] if cuda else ['CPUExecutionProvider']
            session = onnxruntime.InferenceSession(w, providers=providers)
        elif xml:
            LOGGER.info(f'Loading {w} for OpenVINO inference...')
            check_requirements(('openvino-dev',))
            import openvino.inference_engine as ie
            core = ie.IECore()
            if not Path(w).is_file():
                w = next(Path(w).glob('*.xml'))
            network = core.read_network(model=w, weights=Path(w).
                with_suffix('.bin'))
            executable_network = core.load_network(network, device_name=
                'CPU', num_requests=1)
        elif engine:
            LOGGER.info(f'Loading {w} for TensorRT inference...')
            import tensorrt as trt
            check_version(trt.__version__, '7.0.0', hard=True)
            Binding = namedtuple('Binding', ('name', 'dtype', 'shape',
                'data', 'ptr'))
            trt_fp16_input = False
            logger = trt.Logger(trt.Logger.INFO)
            with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
                model = runtime.deserialize_cuda_engine(f.read())
            bindings = OrderedDict()
            for index in range(model.num_bindings):
                name = model.get_binding_name(index)
                dtype = trt.nptype(model.get_binding_dtype(index))
                shape = tuple(model.get_binding_shape(index))
                data = paddle.to_tensor(data=np.empty(shape, dtype=np.dtype
                    (dtype))).to(device)
                """Class Method: *.data_ptr, can not convert, please check whether it is torch.Tensor.*/Optimizer.*/nn.Module.*/torch.distributions.Distribution.*/torch.autograd.function.FunctionCtx.*/torch.profiler.profile.*/torch.autograd.profiler.profile.*, and convert manually"""
                bindings[name] = Binding(name, dtype, shape, data, int(data
                    .data_ptr()))
                if model.binding_is_input(index) and dtype == np.float16:
                    trt_fp16_input = True
            binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()
                )
            context = model.create_execution_context()
            batch_size = tuple(bindings['images'].shape)[0]
        elif coreml:
            LOGGER.info(f'Loading {w} for CoreML inference...')
            import coremltools as ct
            model = ct.models.MLModel(w)
        elif saved_model:
            LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
            import tensorflow as tf
            keras = False
            model = tf.keras.models.load_model(w
                ) if keras else tf.saved_model.load(w)
        elif pb:
            LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
            import tensorflow as tf

            def wrap_frozen_graph(gd, inputs, outputs):
                x = tf.compat.v1.wrap_function(lambda : tf.compat.v1.
                    import_graph_def(gd, name=''), [])
                ge = x.graph.as_graph_element
                return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.
                    map_structure(ge, outputs))
            gd = tf.Graph().as_graph_def()
            gd.ParseFromString(open(w, 'rb').read())
            frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=
                'Identity:0')
        elif tflite or edgetpu:
            try:
                from tflite_runtime.interpreter import Interpreter, load_delegate
            except ImportError:
                import tensorflow as tf
                Interpreter, load_delegate = (tf.lite.Interpreter, tf.lite.
                    experimental.load_delegate)
            if edgetpu:
                LOGGER.info(
                    f'Loading {w} for TensorFlow Lite Edge TPU inference...')
                delegate = {'Linux': 'libedgetpu.so.1', 'Darwin':
                    'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform
                    .system()]
                interpreter = Interpreter(model_path=w,
                    experimental_delegates=[load_delegate(delegate)])
            else:
                LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
                interpreter = Interpreter(model_path=w)
            interpreter.allocate_tensors()
            input_details = interpreter.get_input_details()
            output_details = interpreter.get_output_details()
        elif tfjs:
            raise Exception('ERROR: YOLOv5 TF.js inference is not supported')
        self.__dict__.update(locals())

    def forward(self, im, augment=False, visualize=False, val=False):
        b, ch, h, w = tuple(im.shape)
        if self.pt or self.jit:
            y = self.model(im) if self.jit else self.model(im, augment=
                augment, visualize=visualize)
            return y if val else y[0]
        elif self.dnn:
            im = im.cpu().numpy()
            self.net.setInput(im)
            y = self.net.forward()
        elif self.onnx:
            im = im.cpu().numpy()
            y = self.session.run([self.session.get_outputs()[0].name], {
                self.session.get_inputs()[0].name: im})[0]
        elif self.xml:
            im = im.cpu().numpy()
            desc = self.ie.TensorDesc(precision='FP32', dims=tuple(im.shape
                ), layout='NCHW')
            request = self.executable_network.requests[0]
            request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im))
            request.infer()
            y = request.output_blobs['output'].buffer
        elif self.engine:
            assert tuple(im.shape) == tuple(self.bindings['images'].shape), (
                tuple(im.shape), tuple(self.bindings['images'].shape))
            """Class Method: *.data_ptr, can not convert, please check whether it is torch.Tensor.*/Optimizer.*/nn.Module.*/torch.distributions.Distribution.*/torch.autograd.function.FunctionCtx.*/torch.profiler.profile.*/torch.autograd.profiler.profile.*, and convert manually"""
            self.binding_addrs['images'] = int(im.data_ptr())
            self.context.execute_v2(list(self.binding_addrs.values()))
            y = self.bindings['output'].data
        elif self.coreml:
            im = im.transpose(perm=[0, 2, 3, 1]).cpu().numpy()
            im = Image.fromarray((im[0] * 255).astype('uint8'))
            y = self.model.predict({'image': im})
            if 'confidence' in y:
                box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]])
                conf, cls = y['confidence'].max(1), y['confidence'].argmax(axis
                    =1).astype(np.float)
                y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-
                    1, 1)), 1)
            else:
                k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in
                    y)[-1])
                y = y[k]
        else:
            im = im.transpose(perm=[0, 2, 3, 1]).cpu().numpy()
            if self.saved_model:
                y = (self.model(im, training=False) if self.keras else self
                    .model(im)[0]).numpy()
            elif self.pb:
                y = self.frozen_func(x=self.tf.constant(im)).numpy()
            else:
                input, output = self.input_details[0], self.output_details[0]
                int8 = input['dtype'] == np.uint8
                if int8:
                    scale, zero_point = input['quantization']
                    im = (im / scale + zero_point).astype(np.uint8)
                self.interpreter.set_tensor(input['index'], im)
                self.interpreter.invoke()
                y = self.interpreter.get_tensor(output['index'])
                if int8:
                    scale, zero_point = output['quantization']
                    y = (y.astype(np.float32) - zero_point) * scale
            y[..., :4] *= [w, h, w, h]
        y = paddle.to_tensor(data=y) if isinstance(y, np.ndarray) else y
        return (y, []) if val else y

    def warmup(self, imgsz=(1, 3, 640, 640), half=False):
        if self.pt or self.jit or self.onnx or self.engine:
            if isinstance(self.device, (paddle.CPUPlace, paddle.CUDAPlace, str)
                ) and self.device.type != 'cpu':
                im = paddle.zeros(shape=imgsz).to(self.device).astype(
                    'float16' if half else 'float32')
                self.forward(im)

    @staticmethod
    def model_type(p='path/to/model.pt'):
        from export import export_formats
        suffixes = list(export_formats().Suffix) + ['.xml']
        check_suffix(p, suffixes)
        p = Path(p).name
        (pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite,
            edgetpu, tfjs, xml2) = (s in p for s in suffixes)
        xml |= xml2
        tflite &= not edgetpu
        return (pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite,
            edgetpu, tfjs)


class AutoShape(paddle.nn.Layer):
    conf = 0.25
    iou = 0.45
    agnostic = False
    multi_label = False
    classes = None
    max_det = 1000
    amp = False

    def __init__(self, model):
        super().__init__()
        LOGGER.info('Adding AutoShape... ')
        copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names',
            'stride', 'abc'), exclude=())
        self.dmb = isinstance(model, DetectMultiBackend)
        self.pt = not self.dmb or model.pt
        self.model = model.eval()

    def _apply(self, fn):
        self = super()._apply(fn)
        if self.pt:
            m = self.model.model.model[-1] if self.dmb else self.model.model[-1
                ]
            m.stride = fn(m.stride)
            m.grid = list(map(fn, m.grid))
            if isinstance(m.anchor_grid, list):
                m.anchor_grid = list(map(fn, m.anchor_grid))
        return self

    @paddle.no_grad()
    def forward(self, imgs, size=640, augment=False, profile=False):
        t = [time_sync()]
        p = next(self.model.parameters()) if self.pt else paddle.zeros(shape
            =[1])
        autocast = self.amp and p.device.type != 'cpu'
        if isinstance(imgs, paddle.Tensor):
            with paddle.amp.auto_cast(enable=autocast):
                return self.model(imgs.to(p.place).astype(dtype=p.dtype),
                    augment, profile)
        n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs])
        shape0, shape1, files = [], [], []
        for i, im in enumerate(imgs):
            f = f'image{i}'
            if isinstance(im, (str, Path)):
                im, f = Image.open(requests.get(im, stream=True).raw if str
                    (im).startswith('http') else im), im
                im = np.asarray(exif_transpose(im))
            elif isinstance(im, Image.Image):
                im, f = np.asarray(exif_transpose(im)), getattr(im,
                    'filename', f) or f
            files.append(Path(f).with_suffix('.jpg').name)
            if tuple(im.shape)[0] < 5:
                im = im.transpose((1, 2, 0))
            im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3)
            s = tuple(im.shape)[:2]
            shape0.append(s)
            g = size / max(s)
            shape1.append([(y * g) for y in s])
            imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im)
        shape1 = [make_divisible(x, self.stride) for x in np.stack(shape1, 
            0).max(0)]
        x = [letterbox(im, new_shape=shape1 if self.pt else size, auto=
            False)[0] for im in imgs]
        x = np.stack(x, 0) if n > 1 else x[0][None]
        x = np.ascontiguousarray(x.transpose((0, 3, 1, 2)))
        x = paddle.to_tensor(data=x).to(p.place).astype(dtype=p.dtype) / 255
        t.append(time_sync())
        with paddle.amp.auto_cast(enable=autocast):
            y = self.model(x, augment, profile)
            t.append(time_sync())
            y = non_max_suppression(y if self.dmb else y[0], self.conf,
                iou_thres=self.iou, classes=self.classes, agnostic=self.
                agnostic, multi_label=self.multi_label, max_det=self.max_det)
            for i in range(n):
                scale_coords(shape1, y[i][:, :4], shape0[i])
            t.append(time_sync())
            return Detections(imgs, y, files, t, self.names, tuple(x.shape))


class Detections:

    def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None,
        shape=None):
        super().__init__()
        d = pred[0].place
        gn = [paddle.to_tensor(data=[*(tuple(im.shape)[i] for i in [1, 0, 1,
            0]), 1, 1], place=d) for im in imgs]
        self.imgs = imgs
        self.pred = pred
        self.names = names
        self.files = files
        self.times = times
        self.xyxy = pred
        self.xywh = [xyxy2xywh(x) for x in pred]
        self.xyxyn = [(x / g) for x, g in zip(self.xyxy, gn)]
        self.xywhn = [(x / g) for x, g in zip(self.xywh, gn)]
        self.n = len(self.pred)
        self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in
            range(3))
        self.s = shape

    def display(self, pprint=False, show=False, save=False, crop=False,
        render=False, save_dir=Path('')):
        crops = []
        for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
            s = (
                f'image {i + 1}/{len(self.pred)}: {tuple(im.shape)[0]}x{tuple(im.shape)[1]} '
                )
            if tuple(pred.shape)[0]:
                for c in pred[:, -1].unique():
                    n = (pred[:, -1] == c).sum()
                    s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "
                if show or save or render or crop:
                    annotator = Annotator(im, example=str(self.names))
                    for *box, conf, cls in reversed(pred):
                        label = f'{self.names[int(cls)]} {conf:.2f}'
                        if crop:
                            file = save_dir / 'crops' / self.names[int(cls)
                                ] / self.files[i] if save else None
                            crops.append({'box': box, 'conf': conf, 'cls':
                                cls, 'label': label, 'im': save_one_box(box,
                                im, file=file, save=save)})
                        else:
                            annotator.box_label(box, label, color=colors(cls))
                    im = annotator.im
            else:
                s += '(no detections)'
            im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.
                ndarray) else im
            if pprint:
                LOGGER.info(s.rstrip(', '))
            if show:
                im.show(self.files[i])
            if save:
                f = self.files[i]
                im.save(save_dir / f)
                if i == self.n - 1:
                    LOGGER.info(
                        f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}"
                        )
            if render:
                self.imgs[i] = np.asarray(im)
        if crop:
            if save:
                LOGGER.info(f'Saved results to {save_dir}\n')
            return crops

    def print(self):
        self.display(pprint=True)
        LOGGER.info(
            f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}'
             % self.t)

    def show(self):
        self.display(show=True)

    def save(self, save_dir='runs/detect/exp'):
        save_dir = increment_path(save_dir, exist_ok=save_dir !=
            'runs/detect/exp', mkdir=True)
        self.display(save=True, save_dir=save_dir)

    def crop(self, save=True, save_dir='runs/detect/exp'):
        save_dir = increment_path(save_dir, exist_ok=save_dir !=
            'runs/detect/exp', mkdir=True) if save else None
        return self.display(crop=True, save=save, save_dir=save_dir)

    def render(self):
        self.display(render=True)
        return self.imgs

    def pandas(self):
        new = copy(self)
        ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name'
        cb = ('xcenter', 'ycenter', 'width', 'height', 'confidence',
            'class', 'name')
        for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
            a = [[(x[:5] + [int(x[5]), self.names[int(x[5])]]) for x in x.
                tolist()] for x in getattr(self, k)]
            setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
        return new

    def tolist(self):
        r = range(self.n)
        x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]],
            self.times, self.names, self.s) for i in r]
        return x

    def __len__(self):
        return self.n


class Classify(paddle.nn.Layer):

    def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
        super().__init__()
        self.aap = paddle.nn.AdaptiveAvgPool2D(output_size=1)
        self.conv = paddle.nn.Conv2D(in_channels=c1, out_channels=c2,
            kernel_size=k, stride=s, padding=autopad(k, p), groups=g)
        self.flat = paddle.nn.Flatten()

    def forward(self, x):
        z = paddle.concat(x=[self.aap(y) for y in (x if isinstance(x, list)
             else [x])], axis=1)
        return self.flat(self.conv(z))


""" mobilenet """


class BottleneckMOB(paddle.nn.Layer):

    def __init__(self, c1, c2, s, expand_ratio, use_se=False):
        super(BottleneckMOB, self).__init__()
        self.s = s
        self.use_se = use_se
        hidden_dim = round(c1 * expand_ratio)
        self.use_res_connect = self.s == 1 and c1 == c2
        if expand_ratio == 1:
            self.conv = paddle.nn.Sequential(paddle.nn.Conv2D(in_channels=
                hidden_dim, out_channels=hidden_dim, kernel_size=3, stride=
                s, padding=1, groups=hidden_dim, bias_attr=False), paddle.
                nn.BatchNorm2D(num_features=hidden_dim), paddle.nn.ReLU6(),
                paddle.nn.Conv2D(in_channels=hidden_dim, out_channels=c2,
                kernel_size=1, stride=1, padding=0, bias_attr=False),
                paddle.nn.BatchNorm2D(num_features=c2))
        else:
            self.conv = paddle.nn.Sequential(paddle.nn.Conv2D(in_channels=
                c1, out_channels=hidden_dim, kernel_size=1, stride=1,
                padding=0, bias_attr=False), paddle.nn.BatchNorm2D(
                num_features=hidden_dim), paddle.nn.ReLU6(), paddle.nn.
                Conv2D(in_channels=hidden_dim, out_channels=hidden_dim,
                kernel_size=3, stride=s, padding=1, groups=hidden_dim,
                bias_attr=False), paddle.nn.BatchNorm2D(num_features=
                hidden_dim), paddle.nn.ReLU6(), paddle.nn.Conv2D(
                in_channels=hidden_dim, out_channels=c2, kernel_size=1,
                stride=1, padding=0, bias_attr=False), paddle.nn.
                BatchNorm2D(num_features=c2))
        if use_se:
            self.se = SELayer(c2)

    def forward(self, x):
        if self.use_se:
            conv_x = self.se(self.conv(x))
        else:
            conv_x = self.conv(x)
        if self.use_res_connect:
            return x + conv_x
        else:
            return conv_x


class CBM(paddle.nn.Layer):

    def __init__(self, in_channels, out_channels, kernel_size, stride, bias
        =False):
        super().__init__()
        self.conv = paddle.nn.Conv2D(in_channels=in_channels, out_channels=
            out_channels, kernel_size=kernel_size, stride=stride, padding=(
            kernel_size - 1) // 2)
        self.bn = paddle.nn.BatchNorm2D(num_features=out_channels)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = x * paddle.nn.functional.tanh(x=paddle.nn.functional.softplus(x=x))
        return x


class CBL(paddle.nn.Layer):

    def __init__(self, in_channels, out_channels, kernel_size, stride, bias
        =False):
        super().__init__()
        self.conv = paddle.nn.Conv2D(in_channels=in_channels, out_channels=
            out_channels, kernel_size=kernel_size, stride=stride, padding=(
            kernel_size - 1) // 2)
        self.bn = paddle.nn.BatchNorm2D(num_features=out_channels)
        self.act = paddle.nn.ReLU6()

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.act(x)
        return x


class DepthWiseConv(paddle.nn.Layer):

    def __init__(self, in_ch, out_ch):
        super(DepthWiseConv, self).__init__()
        self.depth_conv = paddle.nn.Conv2D(in_channels=in_ch, out_channels=
            in_ch, kernel_size=3, stride=1, padding=1, groups=in_ch)
        self.point_conv = paddle.nn.Conv2D(in_channels=in_ch, out_channels=
            out_ch, kernel_size=1, stride=1, padding=0, groups=1)

    def forward(self, input):
        out = self.depth_conv(input)
        out = self.point_conv(out)
        return out


class BSBottleneck(paddle.nn.Layer):

    def __init__(self, c1, c2, s, expand_ratio, use_se=False):
        super(BSBottleneck, self).__init__()
        self.s = s
        self.use_se = use_se
        hidden_dim = round(c1 * expand_ratio)
        self.use_res_connect = self.s == 1 and c1 == c2
        if expand_ratio == 1:
            self.conv = paddle.nn.Sequential(BSConvU(hidden_dim, hidden_dim,
                3, s, 1), paddle.nn.BatchNorm2D(num_features=hidden_dim),
                paddle.nn.ReLU6(), BSConvU(hidden_dim, c2, 1, 1, 0), paddle
                .nn.BatchNorm2D(num_features=c2))
        else:
            self.conv = paddle.nn.Sequential(BSConvU(c1, hidden_dim, 1, 1, 
                0), paddle.nn.BatchNorm2D(num_features=hidden_dim), paddle.
                nn.ReLU6(), BSConvU(hidden_dim, hidden_dim, 3, s, 1),
                paddle.nn.BatchNorm2D(num_features=hidden_dim), paddle.nn.
                ReLU6(), BSConvU(hidden_dim, c2, 1, 1, 0), paddle.nn.
                BatchNorm2D(num_features=c2))
        if use_se:
            self.se = SELayer(c2)

    def forward(self, x):
        if self.use_se:
            conv_x = self.se(self.conv(x))
        else:
            conv_x = self.conv(x)
        if self.use_res_connect:
            return x + conv_x
        else:
            return conv_x


class BSConvU(paddle.nn.Layer):

    def __init__(self, in_ch, out_ch, kernel=1, stride=1, padding=0, bias=False
        ):
        super(BSConvU, self).__init__()
        self.point_conv = paddle.nn.Conv2D(in_channels=in_ch, out_channels=
            out_ch, kernel_size=1, stride=1, padding=0, groups=1, bias_attr
            =bias)
        self.bn = paddle.nn.BatchNorm2D(num_features=out_ch)
        self.depth_conv = paddle.nn.Conv2D(in_channels=out_ch, out_channels
            =out_ch, kernel_size=kernel, stride=stride, padding=padding,
            groups=out_ch, bias_attr=bias)

    def forward(self, input):
        out = self.point_conv(input)
        out = self.bn(out)
        out = self.depth_conv(out)
        return out


class BSConvS(paddle.nn.Layer):

    def __init__(self, in_ch, out_ch, kernel=1, stride=1, padding=0, bias=False
        ):
        super(BSConvS, self).__init__()
        self.point_conv1 = paddle.nn.Conv2D(in_channels=in_ch, out_channels
            =out_ch, kernel_size=1, stride=1, padding=0, groups=1,
            bias_attr=bias)
        self.bn = paddle.nn.BatchNorm2D(num_features=out_ch)
        self.point_conv2 = paddle.nn.Conv2D(in_channels=out_ch,
            out_channels=out_ch, kernel_size=1, stride=1, padding=0, groups
            =1, bias_attr=bias)
        self.depth_conv = paddle.nn.Conv2D(in_channels=out_ch, out_channels
            =out_ch, kernel_size=kernel, stride=stride, padding=padding,
            groups=out_ch, bias_attr=bias)

    def forward(self, input):
        out = self.point_conv1(input)
        out = self.bn(out)
        out = self.point_conv2(out)
        out = self.bn(out)
        out = self.depth_conv(out)
        return out


class Conv3BN(paddle.nn.Layer):
    """
    This equals to
    def conv_3x3_bn(inp, oup, stride):
        return nn.Sequential(
            nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
            nn.BatchNorm2d(oup),
            h_swish()
        )
    """

    def __init__(self, inp, oup, stride):
        super(Conv3BN, self).__init__()
        self.conv = paddle.nn.Conv2D(in_channels=inp, out_channels=oup,
            kernel_size=3, stride=stride, padding=1, bias_attr=False)
        self.bn = paddle.nn.BatchNorm2D(num_features=oup)
        self.act = h_swish()

    def forward(self, x):
        return self.act(self.bn(self.conv(x)))

    def fuseforward(self, x):
        return self.act(self.conv(x))


class InvertedResidual(paddle.nn.Layer):

    def __init__(self, inp, oup, hidden_dim, kernel_size, stride, use_se,
        use_hs):
        super(InvertedResidual, self).__init__()
        assert stride in [1, 2]
        self.identity = stride == 1 and inp == oup
        if inp == hidden_dim:
            self.conv = paddle.nn.Sequential(paddle.nn.Conv2D(in_channels=
                hidden_dim, out_channels=hidden_dim, kernel_size=
                kernel_size, stride=stride, padding=(kernel_size - 1) // 2,
                groups=hidden_dim, bias_attr=False), paddle.nn.BatchNorm2D(
                num_features=hidden_dim), h_swish() if use_hs else paddle.
                nn.ReLU(), SELayer(hidden_dim) if use_se else paddle.nn.
                Sequential(), paddle.nn.Conv2D(in_channels=hidden_dim,
                out_channels=oup, kernel_size=1, stride=1, padding=0,
                bias_attr=False), paddle.nn.BatchNorm2D(num_features=oup))
        else:
            self.conv = paddle.nn.Sequential(paddle.nn.Conv2D(in_channels=
                inp, out_channels=hidden_dim, kernel_size=1, stride=1,
                padding=0, bias_attr=False), paddle.nn.BatchNorm2D(
                num_features=hidden_dim), h_swish() if use_hs else paddle.
                nn.ReLU(), paddle.nn.Conv2D(in_channels=hidden_dim,
                out_channels=hidden_dim, kernel_size=kernel_size, stride=
                stride, padding=(kernel_size - 1) // 2, groups=hidden_dim,
                bias_attr=False), paddle.nn.BatchNorm2D(num_features=
                hidden_dim), SELayer(hidden_dim) if use_se else paddle.nn.
                Sequential(), h_swish() if use_hs else paddle.nn.ReLU(),
                paddle.nn.Conv2D(in_channels=hidden_dim, out_channels=oup,
                kernel_size=1, stride=1, padding=0, bias_attr=False),
                paddle.nn.BatchNorm2D(num_features=oup))

    def forward(self, x):
        y = self.conv(x)
        if self.identity:
            return x + y
        else:
            return y
