import numpy as np
import os, cv2

# paddle-lite的构建模型输入函数
def create_inputs_lite(imgs, im_info, batch_size):
    """按照模型标准输入格式生成数据
    Args:
        imgs (list(numpy)): list of images (np.ndarray)
        im_info (list(dict)): list of image info
    Returns:
        inputs (dict): input of model, {image:data, scale_factor:data, im_shape:data}
    """
    inputs = {}

    im_shape = []
    scale_factor = []
    if len(imgs) == 1:
        im_c, im_h, im_w = imgs[0].shape[:]
        padding_im = np.zeros((batch_size, im_c, im_h, im_w), dtype=np.float32)
        padding_im[0,...] = imgs[0]
        inputs['image'] = padding_im
        inputs['im_shape'] = np.array((im_info[0]['im_shape'], )).astype('float32')
        inputs['scale_factor'] = np.array((im_info[0]['scale_factor'], )).astype('float32')
        return inputs

    for e in im_info:
        im_shape.append(np.array((e['im_shape'], )).astype('float32'))
        scale_factor.append(np.array((e['scale_factor'], )).astype('float32'))

    inputs['im_shape'] = np.concatenate(im_shape, axis=0)
    inputs['scale_factor'] = np.concatenate(scale_factor, axis=0)

    imgs_shape = [[e.shape[1], e.shape[2]] for e in imgs]
    max_shape_h = max([e[0] for e in imgs_shape])
    max_shape_w = max([e[1] for e in imgs_shape])
    padding_imgs = np.zeros((batch_size, 3, max_shape_h, max_shape_w), dtype=np.float32)
    for idx_, img in enumerate(imgs):
        im_c, im_h, im_w = img.shape[:]
        padding_imgs[idx_, :, :im_h, :im_w] = img
    inputs['image'] = padding_imgs
    return inputs

# 构建模型输入
def create_inputs(imgs, im_info):
    """按照模型标准输入格式生成数据
    Args:
        imgs (list(numpy)): list of images (np.ndarray)
        im_info (list(dict)): list of image info
    Returns:
        inputs (dict): input of model, {image:data, scale_factor:data, im_shape:data}
    """
    inputs = {}

    im_shape = []
    scale_factor = []
    if len(imgs) == 1:
        inputs['image'] = np.array((imgs[0], )).astype('float32')
        inputs['im_shape'] = np.array((im_info[0]['im_shape'], )).astype('float32')
        inputs['scale_factor'] = np.array((im_info[0]['scale_factor'], )).astype('float32')
        return inputs

    for e in im_info:
        im_shape.append(np.array((e['im_shape'], )).astype('float32'))
        scale_factor.append(np.array((e['scale_factor'], )).astype('float32'))

    inputs['im_shape'] = np.concatenate(im_shape, axis=0)
    inputs['scale_factor'] = np.concatenate(scale_factor, axis=0)

    imgs_shape = [[e.shape[1], e.shape[2]] for e in imgs]
    max_shape_h = max([e[0] for e in imgs_shape])
    max_shape_w = max([e[1] for e in imgs_shape])
    padding_imgs = []
    for img in imgs:
        im_c, im_h, im_w = img.shape[:]
        padding_im = np.zeros((im_c, max_shape_h, max_shape_w), dtype=np.float32)
        padding_im[:, :im_h, :im_w] = img
        padding_imgs.append(padding_im)
    inputs['image'] = np.stack(padding_imgs, axis=0)
    return inputs


# 构建预测器, 根据配置在不同device和不同run_mode构建
def load_predictor(model_dir,
                   run_mode='paddle',
                   batch_size=1,
                   device='CPU',
                   min_subgraph_size=3,
                   use_dynamic_shape=False,
                   trt_min_shape=1,
                   trt_max_shape=1280,
                   trt_opt_shape=640,
                   trt_calib_mode=False,
                   cpu_threads=1,
                   enable_mkldnn=False,
                   enable_mkldnn_bfloat16=False,
                   delete_shuffle_pass=False):
    """ 读取模型并根据配置构建预测器
    """
    device_split = device.split(":")
    if len(device_split) == 1:
        device_id = 0
    else:
        device_id = int(device_split[1])
        device = device_split[0]
    # paddle-lite模式
    if device == "ascend":
        from paddlelite.lite import MobileConfig, create_paddle_predictor
        infer_model = os.path.join(model_dir, 'model.nb') # 读取模型
        if not os.path.exists(infer_model):
            raise ValueError("Cannot find any inference model in dir: {},".format(model_dir))
        config = MobileConfig()
        # 设定模型序列化路径
        cache_dir = f"{model_dir}/cache"
        if not os.path.exists(cache_dir):
            os.makedirs(cache_dir)
        config.set_nnadapter_model_cache_dir(cache_dir)

        config.set_model_from_file(infer_model)
        config.set_nnadapter_device_names(["huawei_ascend_npu"])
        predictor = create_paddle_predictor(config)
        return predictor, config
    # paddle/trt 模式
    from paddle.inference import Config, create_predictor
    if device != 'cuda' and run_mode != 'paddle':
        raise ValueError(
            "Predict by TensorRT mode: {}, expect device=='GPU', but device == {}"
            .format(run_mode, device))
    infer_model = os.path.join(model_dir, 'model.pdmodel') # 读取模型
    infer_params = os.path.join(model_dir, 'model.pdiparams') # 读取参数
    if not os.path.exists(infer_model):
        infer_model = os.path.join(model_dir, 'inference.pdmodel')
        infer_params = os.path.join(model_dir, 'inference.pdiparams')
        if not os.path.exists(infer_model):
            raise ValueError(
                "Cannot find any inference model in dir: {},".format(model_dir))
    config = Config(infer_model, infer_params) # 根据模型和相关参数构建配置类对象，Config类来自 paddle.inference
    if device == 'cuda':
        # initial GPU memory(M), device ID
        config.enable_use_gpu(200, device_id) # 允许调用的GPU显存大小
        # optimize graph and fuse op 优化图，同时将可以合并的操作进行合并
        config.switch_ir_optim(True)
    elif device == 'XPU':
        config.enable_lite_engine()
        config.enable_xpu(10 * 1024 * 1024)
    else:
        config.disable_gpu()
        config.set_cpu_math_library_num_threads(cpu_threads)
        if enable_mkldnn:
            try:
                # cache 10 different shapes for mkldnn to avoid memory leak
                config.set_mkldnn_cache_capacity(10)
                config.enable_mkldnn()
                if enable_mkldnn_bfloat16:
                    config.enable_mkldnn_bfloat16()
            except Exception as e:
                print(
                    "The current environment does not support `mkldnn`, so disable mkldnn."
                )
                pass
    # TensorRT的相关配置
    precision_map = {
        'trt_int8': Config.Precision.Int8,
        'trt_fp32': Config.Precision.Float32,
        'trt_fp16': Config.Precision.Half
    }
    if run_mode in precision_map.keys(): # 如果运行模式启用了TensorRT，则需要启用相关引擎
        config.enable_tensorrt_engine(
            workspace_size=(1 << 25) * batch_size,
            max_batch_size=batch_size,
            min_subgraph_size=min_subgraph_size,
            precision_mode=precision_map[run_mode],
            use_static=True,
            use_calib_mode=trt_calib_mode)

        if use_dynamic_shape: # 在TensorRT模式下，如果启用了动态图则需要进行相关配置
            min_input_shape = {
                'image': [batch_size, 3, trt_min_shape, trt_min_shape]
            }
            max_input_shape = {
                'image': [batch_size, 3, trt_max_shape, trt_max_shape]
            }
            opt_input_shape = {
                'image': [batch_size, 3, trt_opt_shape, trt_opt_shape]
            }
            config.set_trt_dynamic_shape_info(min_input_shape, max_input_shape,
                                              opt_input_shape)
            print('trt set dynamic shape done!')

    # disable print log when predict 关闭控制台输出
    config.disable_glog_info()
    # enable shared memory 启用共享内存
    config.enable_memory_optim()
    # disable feed, fetch OP, needed by zero_copy_run
    config.switch_use_feed_fetch_ops(False)
    if delete_shuffle_pass:
        config.delete_pass("shuffle_channel_detect_pass")
    predictor = create_predictor(config) # 构建预测器
    return predictor, config

def multiclass_nms(bboxs, num_classes, match_threshold=0.6, match_metric='iou'):
    final_boxes = []
    for c in range(num_classes):
        idxs = bboxs[:, 0] == c
        if np.count_nonzero(idxs) == 0: continue
        r = nms(bboxs[idxs, 1:], match_threshold, match_metric)
        final_boxes.append(np.concatenate([np.full((r.shape[0], 1), c), r], 1))
    return final_boxes

def nms(dets, match_threshold=0.6, match_metric='iou'):
    """ Apply NMS to avoid detecting too many overlapping bounding boxes.
        Args:
            dets: shape [N, 5], [score, x1, y1, x2, y2]
            match_metric: 'iou' or 'ios'
            match_threshold: overlap thresh for match metric.
    """
    if dets.shape[0] == 0:
        return dets[[], :]
    scores = dets[:, 0]
    x1 = dets[:, 1]
    y1 = dets[:, 2]
    x2 = dets[:, 3]
    y2 = dets[:, 4]
    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = scores.argsort()[::-1]

    ndets = dets.shape[0]
    suppressed = np.zeros((ndets), dtype=np.int32)

    for _i in range(ndets):
        i = order[_i]
        if suppressed[i] == 1:
            continue
        ix1 = x1[i]
        iy1 = y1[i]
        ix2 = x2[i]
        iy2 = y2[i]
        iarea = areas[i]
        for _j in range(_i + 1, ndets):
            j = order[_j]
            if suppressed[j] == 1:
                continue
            xx1 = max(ix1, x1[j])
            yy1 = max(iy1, y1[j])
            xx2 = min(ix2, x2[j])
            yy2 = min(iy2, y2[j])
            w = max(0.0, xx2 - xx1 + 1)
            h = max(0.0, yy2 - yy1 + 1)
            inter = w * h
            if match_metric == 'iou':
                union = iarea + areas[j] - inter
                match_value = inter / union
            elif match_metric == 'ios':
                smaller = min(iarea, areas[j])
                match_value = inter / smaller
            else:
                raise ValueError()
            if match_value >= match_threshold:
                suppressed[j] = 1
    keep = np.where(suppressed == 0)[0]
    dets = dets[keep, :]
    return dets