import numpy as np
import os, cv2
from ..visualize import get_color_map_list

def visualize(images, results,color_map = None, save_dir=None, weight=0.6):
    """
    Convert predict result to color image, and save added image.

    Args:
        image (str): The path of origin image.
        result (np.ndarray): The predict result of image.
        color_map (list): The color used to save the prediction results.
        save_dir (str): The directory for saving visual image. Default: None.
        weight (float): The image weight of visual image, and the result weight is (1 - weight). Default: 0.6

    Returns:
        vis_result (np.ndarray): If `save_dir` is None, return the visualized result.
    """
    show_imgs = []
    if color_map is None:
        color_map = get_color_map_list(256)
        color_map[1] = [255,0,0]
        color_map[2] = [0,0,255]
    color_map = np.array(color_map).astype("uint8")
    for image, result in zip(images, results):
        # Use OpenCV LUT for color mapping
        c1 = cv2.LUT(result, color_map[:, 0])
        c2 = cv2.LUT(result, color_map[:, 1])
        c3 = cv2.LUT(result, color_map[:, 2])
        pseudo_img = np.dstack((c3, c2, c1))
        if isinstance(image,str):
            im = cv2.imread(image)
        else:
            im = image
        vis_result = cv2.addWeighted(im, weight, pseudo_img, 1 - weight, 0)
        vis_result[result==0] = im[result==0]
        show_imgs.append(vis_result)
    return show_imgs

# 构建模型输入
# def create_inputs(imgs, im_info):
#     """按照模型标准输入格式生成数据
#     Args:
#         imgs (list(numpy)): list of images (np.ndarray)
#         im_info (list(dict)): list of image info
#     Returns:
#         inputs (dict): input of model, {image:data, scale_factor:data, im_shape:data}
#     """
#     inputs = {}

#     im_shape = []
#     scale_factor = []
#     if len(imgs) == 1:
#         inputs['image'] = np.array((imgs[0], )).astype('float32')
#         inputs['im_shape'] = np.array(
#             (im_info[0]['im_shape'], )).astype('float32')
#         inputs['scale_factor'] = np.array(
#             (im_info[0]['scale_factor'], )).astype('float32')
#         return inputs

#     for e in im_info:
#         im_shape.append(np.array((e['im_shape'], )).astype('float32'))
#         scale_factor.append(np.array((e['scale_factor'], )).astype('float32'))

#     inputs['im_shape'] = np.concatenate(im_shape, axis=0)
#     inputs['scale_factor'] = np.concatenate(scale_factor, axis=0)

#     imgs_shape = [[e.shape[1], e.shape[2]] for e in imgs]
#     max_shape_h = max([e[0] for e in imgs_shape])
#     max_shape_w = max([e[1] for e in imgs_shape])
#     padding_imgs = []
#     for img in imgs:
#         im_c, im_h, im_w = img.shape[:]
#         padding_im = np.zeros(
#             (im_c, max_shape_h, max_shape_w), dtype=np.float32)
#         padding_im[:, :im_h, :im_w] = img
#         padding_imgs.append(padding_im)
#     inputs['image'] = np.stack(padding_imgs, axis=0)
#     return inputs


# 构建预测器, 根据配置在不同device和不同run_mode构建
# def load_predictor(model_dir,
#                    run_mode='paddle',
#                    batch_size=1,
#                    device='CPU',
#                    min_subgraph_size=3,
#                    use_dynamic_shape=False,
#                    trt_min_shape=[100,100],
#                    trt_max_shape=[3000,2000],
#                    trt_opt_shape=[1024,512],
#                    trt_calib_mode=False,
#                    cpu_threads=1,
#                    enable_mkldnn=False,
#                    enable_mkldnn_bfloat16=False,
#                    delete_shuffle_pass=False):
#     """ 读取模型并根据配置构建预测器
#     """
#     device_split = device.split(":")
#     if len(device_split) == 1:
#         device_id = 0
#     else:
#         device_id = int(device_split[1])
#         device = device_split[0]
#     from paddle.inference import Config, create_predictor
#     if device != 'GPU' and run_mode != 'paddle':
#         raise ValueError(
#             "Predict by TensorRT mode: {}, expect device=='GPU', but device == {}"
#             .format(run_mode, device))
#     infer_model = os.path.join(model_dir, 'model.pdmodel') # 读取模型
#     infer_params = os.path.join(model_dir, 'model.pdiparams') # 读取参数
#     if not os.path.exists(infer_model):
#         infer_model = os.path.join(model_dir, 'inference.pdmodel')
#         infer_params = os.path.join(model_dir, 'inference.pdiparams')
#         if not os.path.exists(infer_model):
#             raise ValueError(
#                 "Cannot find any inference model in dir: {},".format(model_dir))
#     config = Config(infer_model, infer_params) # 根据模型和相关参数构建配置类对象，Config类来自 paddle.inference
#     if device == 'GPU':
#         # initial GPU memory(M), device ID
#         config.enable_use_gpu(200, device_id) # 初始化GPU
#         # optimize graph and fuse op 优化图，同时将可以合并的操作进行合并
#         config.switch_ir_optim(True)
#     elif device == 'XPU':
#         config.enable_lite_engine()
#         config.enable_xpu(10 * 1024 * 1024)
#     else:
#         config.disable_gpu()
#         config.set_cpu_math_library_num_threads(cpu_threads)
#         if enable_mkldnn:
#             try:
#                 # cache 10 different shapes for mkldnn to avoid memory leak
#                 config.set_mkldnn_cache_capacity(10)
#                 config.enable_mkldnn()
#                 if enable_mkldnn_bfloat16:
#                     config.enable_mkldnn_bfloat16()
#             except Exception as e:
#                 print(
#                     "The current environment does not support `mkldnn`, so disable mkldnn."
#                 )
#                 pass
#     # TensorRT的相关配置
#     precision_map = {
#         'trt_int8': Config.Precision.Int8,
#         'trt_fp32': Config.Precision.Float32,
#         'trt_fp16': Config.Precision.Half
#     }
#     if run_mode in precision_map.keys(): # 如果运行模式启用了TensorRT，则需要启用相关引擎
#         config.enable_tensorrt_engine(
#             workspace_size=(1 << 25) * batch_size,
#             max_batch_size=batch_size,
#             min_subgraph_size=min_subgraph_size,
#             precision_mode=precision_map[run_mode],
#             use_static=True,
#             use_calib_mode=trt_calib_mode)

#         if use_dynamic_shape: # 在TensorRT模式下，如果启用了动态图则需要进行相关配置
#             min_input_shape = {
#                 'x': [batch_size, 3, trt_min_shape[1], trt_min_shape[0]]
#             }
#             max_input_shape = {
#                 'x': [batch_size, 3, trt_max_shape[1], trt_max_shape[0]]
#             }
#             opt_input_shape = {
#                 'x': [batch_size, 3, trt_opt_shape[1], trt_opt_shape[0]]
#             }
#             config.set_trt_dynamic_shape_info(min_input_shape, max_input_shape,
#                                               opt_input_shape)
#             print('trt set dynamic shape done!')

#     # disable print log when predict 关闭控制台输出
#     config.disable_glog_info()
#     # enable shared memory 启用共享内存
#     config.enable_memory_optim()
#     # disable feed, fetch OP, needed by zero_copy_run
#     config.switch_use_feed_fetch_ops(False)
#     if delete_shuffle_pass:
#         config.delete_pass("shuffle_channel_detect_pass")
#     predictor = create_predictor(config) # 构建预测器
#     return predictor, config

