import numpy as np
import onnxruntime
import cv2
import copy
import json
import os, functools
import os.path as osp
import shutil

from tool import filesystem

def decode_image(raw_image):
    im_bgr = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB)  # bgr转rgb
    img_info = {
        "im_shape": np.array(  #
            im_bgr.shape[:2], dtype=np.float32),
        "scale_factor": np.array(  #
            [1., 1.], dtype=np.float32)
    }
    return raw_image, im_bgr, img_info


class Resize(object):
    def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):  # 图像目标的大小；无论是否keep_ratio默认True；双线插值法
        if isinstance(target_size, int):  # 检查target_size 是否为整数
            target_size = [target_size, target_size]  # 将目标的大小【size】转为【size，size】
        self.target_size = target_size
        self.keep_ratio = keep_ratio
        self.interp = interp

    def __call__(self, im, im_info):  # im(np.ndarray),im_info(dict) 都描述image
        assert len(self.target_size) == 2  # 确保self.target_size是一个包含两个值的列表，表示目标图像的w，h
        assert self.target_size[0] > 0 and self.target_size[1] > 0  # 确保w，h都是正数
        im_channel = im.shape[2]  # 获取通道数
        im_scale_y, im_scale_x = self.generate_scale(im)  # 根据图像的尺寸生产缩放因子，调整目标大小
        im = cv2.resize(  # 将“im”调整到目标大小，使用之前计算得到的缩放因子
            im,
            None,
            None,
            fx=im_scale_x,
            fy=im_scale_y,
            interpolation=self.interp)
        im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')  # 将w，h以浮点数存储在im_info字典中
        im_info['scale_factor'] = np.array(
            [im_scale_y, im_scale_x]).astype('float32')  # 将缩放因子存储在im_info字典中
        return im, im_info

    def generate_scale(self, im):
        origin_shape = im.shape[:2]  # 获取图像w，h
        im_c = im.shape[2]  # 获取图像通道
        if self.keep_ratio:  # 检查是否保持图像的纵横比
            im_size_min = np.min(origin_shape)  # 计算图像w，h的最小值，计算缩放因子
            im_size_max = np.max(origin_shape)
            target_size_min = np.min(self.target_size)  # 计算目标w，h的最小值
            target_size_max = np.max(self.target_size)
            im_scale = float(target_size_min) / float(im_size_min)  # 计算缩放因子，使图像的最小边缩放到目标最小边
            if np.round(im_scale * im_size_max) > target_size_max:  # 检查当期缩放因子的最大边是否超过了目标的最大边
                im_scale = float(target_size_max) / float(im_size_max)  # 如果超过将缩放因子重新计算，以确保不会超出目标最大边
            im_scale_x = im_scale  # 如果保持纵横比，将x和y方向的缩放因子设置为相同的值
            im_scale_y = im_scale
        else:
            resize_h, resize_w = self.target_size  # 获取目标的w,h
            im_scale_y = resize_h / float(origin_shape[0])  # 计算y方向的缩放因子，将目标的高度调整到目标高度
            im_scale_x = resize_w / float(origin_shape[1])
        return im_scale_y, im_scale_x


class NormalizeImage(object):

    def __init__(self, mean, std, is_scale=True, norm_type='mean_std'):
        self.mean = mean  # 输入图像减去的均值（列表）
        std.std = std  # 输入图像除以的标准差（列表）
        self.is_scale = is_scale  # 是否将图像缩放到【0,1】，默认为“True”
        self.norm_type = norm_type  # 标准化的类型

    def __call__(self, im, im_info):  # 类的调用， 实现了对图像的标准化处理
        """
               Args:
                   im (np.ndarray): image (np.ndarray)
                   im_info (dict): info of image
               Returns:
                   im (np.ndarray):  processed image (np.ndarray)
                   im_info (dict): info of processed image
               """
        im = im.astype(np.float32, copy=False)  # 将数据类型转换为“np.float32”
        if self.is_scale:
            scale = 1.0 / 255.0  # 将图像缩放到【0,1】范围内
            im *= scale  # 将图像乘以缩放比例，使像素值【0,1】

        if self.norm_type == 'mean_std':
            mean = np.array(self.mean)[np.newaxis, np.newaxis, :]  # 将均值列表转换为numpy数组，并添加两个维度，适应图像的维度
            std = np.array(self.std)[np.newaxis, np.newaxis, :]  # 将标准差。。。。。。
            im -= mean  # 减去均值
            im /= std  # 除以标准差
        return im, im_info


class Permute(object):
    def __init__(self, ):
        super(Permute, self).__init__()

    def __call__(self, im, im_info):
        im = im.transpose((2, 0, 1)).copy()
        return im, im_info


class PadStride(object):
    def __init__(self, stride=0):
        self.coarsest_stride = stride

    def __call__(self, im, im_info):  # im(图像数组)，im_info（图像信息字典）
        coarsest_stride = self.coarsest_stride
        if coarsest_stride <= 0:  # 如果步长小于0就不需要进行填充操作
            return im, im_info  # 返回原始图像的，图像信息字典
        im_c, im_h, im_w = im.shape  # 获取图像的c，h，w
        pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)  # 计算在高度方向上需要填充的像素值，使高度能被整除
        pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
        padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)  # 创建一个填充后尺寸的全零图像数组
        padding_im[:, :im_h, :im_w] = im  # 将原始图像的数据复制到填充后图像的左上角
        return padding_im, im_info  # 返回填充后的图像数组和图像信息


class LetterBoxResize(object):
    def __init__(self, target_size):
        super(LetterBoxResize, self).__init__()  # letterbox 缩放后的目标尺寸
        if isinstance(target_size, int):
            target_size = [target_size, target_size]  # 是一个整数，被转换成一个包含两个相同整数的列表
        self.target_size = target_size

    def letterbox(self, img, height, width, color=(127.5, 127.5, 127.5)):  # 接收输入img，目标hight和目标widith，填充颜色默认（灰色）
        # letterbox: resize a rectangular image to a padded rectangular
        shape = img.shape[:2]  # 获取[height, width]
        ratio_h = float(height) / shape[0]  # 计算h的缩放比例
        ratio_w = float(width) / shape[1]  # 计算w的缩放比例
        ratio = min(ratio_h, ratio_w)  # 选择高度和宽度缩放比例中的最小值，以确保图像在指定尺寸内缩放
        new_shape = (round(shape[1] * ratio),
                     round(shape[0] * ratio))  # 计算新的缩放后的[width, height]
        padw = (width - new_shape[0]) / 2  # 计算水平填充量
        padh = (height - new_shape[1]) / 2
        top, bottom = round(padh - 0.1), round(padh + 0.1)  # 计算上下填充的像素量，同样减少填充量
        left, right = round(padw - 0.1), round(padw + 0.1)

        img = cv2.resize(
            img, new_shape, interpolation=cv2.INTER_AREA)  # resized, no border
        img = cv2.copyMakeBorder(
            img, top, bottom, left, right, cv2.BORDER_CONSTANT,  # 在图像周围添加边界，以实现letterbox缩放
            value=color)  # padded rectangular
        return img, ratio, padw, padh

    def __call__(self, im, im_info):
        assert len(self.target_size) == 2  # 确保目标尺寸是包含两个值的列表
        assert self.target_size[0] > 0 and self.target_size[1] > 0  # 确保目标尺寸都大于0
        height, width = self.target_size  # 获取h、w
        h, w = im.shape[:2]  # 获取原始h、w
        im, ratio, padw, padh = self.letterbox(im, height=height, width=width)

        new_shape = [round(h * ratio), round(w * ratio)]  # 计算新的图像尺寸，更新图像信息
        im_info['im_shape'] = np.array(new_shape, dtype=np.float32)  # 更新图像信息中的图像形状
        im_info['scale_factor'] = np.array([ratio, ratio], dtype=np.float32)  # 更新缩放因子
        return im, im_info


class Pad(object):
    def __init__(self, size, fill_value=[114.0, 114.0, 114.0]):  # 填充颜色默认为114
        """
        Pad image to a specified size.
        Args:
            size (list[int]): image target size
            fill_value (list[float]): rgb value of pad area, default (114.0, 114.0, 114.0)
        """
        super(Pad, self).__init__()
        if isinstance(size, int):  # 检是否为整数
            size = [size, size]
        self.size = size
        self.fill_value = fill_value

    def __call__(self, im, im_info):
        im_h, im_w = im.shape[:2]
        h, w = self.size
        if h == im_h and w == im_w:  # 检测图像的尺寸是否等于目标尺寸，如果是，则直接返回原始图像
            im = im.astype(np.float32)
            return im, im_info

        canvas = np.ones((h, w, 3), dtype=np.float32)  # 创建3通道，作为填充后的画布
        canvas *= np.array(self.fill_value, dtype=np.float32)  # 画布的每个通道的像素值设置为填充颜色值
        canvas[0:im_h, 0:im_w, :] = im.astype(np.float32)  # 将原始图像复制到画布的左上角，实现填充
        im = canvas  # 将填充后的画布赋值给图像变量
        return im, im_info


class Compose:
    def __init__(self, transforms):  # Transforms是一个包含各种图像转换操作的列表
        self.transforms = []  # 包含各种图像转换操作的列表
        for op_info in transforms:  # 遍历传入的Transforms列表中每个图像转换操作
            new_op_info = op_info.copy()  # 创建一个操作信息的副本，以避免修改原始数据
            op_type = new_op_info.pop('type')  # 从操作信息中取出删除键为“type”的值，该值表示图像转换操作的类型
            self.transforms.append(eval(op_type)(**new_op_info))  # 通过“eval”动态创建一个指定类型的图像转换操作，并添加到列表中

    def __call__(self, raw_image):
        image_raw, im_bgr, im_info = decode_image(raw_image)  # 将原始图像数据解码为原始图像数据、bgr格式的图像和图像信息字典
        # cv2.imshow("img",img)
        # cv2.waitKey(0)
        print("\n\n\n\n**************************************#####im_info:{}".format(im_info),
              flush=True)  # 打印图像字典中的内容，flush=True用于立即将内容刷新到 输出

        for t in self.transforms:  # 遍历之前存储在列表中的图像转换操作
            im_bgr, im_info = t(im_bgr, im_info)  # 将每个图像转换操作“t”，将“im_bgr”和“im_info”传递给操作，然后更新
        inputs = copy.deepcopy(im_info)  # 将更新后的图像信息进行深拷贝，以便稍后返回
        inputs['image'] = im_bgr  # 将更新后的图像存储在字典中
        inputs['image_raw'] = image_raw  # 将原始图像存储在字典中
        return inputs  # 返回更新后图像数据和相关信息的字典


class Det_Car_Onnx(object):
    '''
    车辆监测
    基于paddledetection项目改编
    '''

    def __init__(self, infer_config):
        self.infer_config = infer_config  # 算法模型路径和名称
        self.session = None  # 将在初始化时赋值为onnx推理会话

        # print("##########################CUDAExecutionProvider:{}".format(onnxruntime.get_available_providers()))

    def init_onnx_model(self):
        if self.infer_config["encrypt_way"] == "raw":  # 未加密
            if self.infer_config["device"] == "cpu":
                self.session = onnxruntime.InferenceSession(self.infer_config["model_filename"],
                                                            providers=['CPUExecutionProvider'])
            elif self.infer_config["device"] in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]:
                device_id = self.infer_config["device"]
                self.session = onnxruntime.InferenceSession(self.infer_config["model_filename"],
                                                            providers=['CUDAExecutionProvider'])
                self.session.set_providers(['CUDAExecutionProvider'], provider_options=[{'device_id': 0}])
            else:
                self.session = None
        elif self.infer_config["encrypt_way"] == "aes":
            # 还没有实现加密的方法
            self.session = None
        else:
            self.session = None

    def predict_image(self, transforms, raw_image):  # 转化器，输入原始图像
        '''
        检测车辆，返回车辆识别信息：det_result
        det_result=[
            {"class_name":"vehicle","score":0.75,"car_bbox":[200,300,600,800]}
        ]
        '''
        # predict image
        # for raw_image in img_list:
        inputs = transforms(raw_image)  # 通过调用转换器将原始图像转换为模型需要的输入格式
        # print("inputs.keys():{}".format(inputs.keys()))
        image_raw = inputs['image_raw']  # 从转换后的输入中获取原始图像数据
        # cv2.imshow("image_raw",image_raw)
        # cv2.waitKey(0)
        # for key in inputs.keys():#lingyun add
        #     print("{}:{}".format(key,inputs[key]))#lingyun add

        predictor_inputs_name = [var.name for var in self.session.get_inputs()]  # 获取模型输入的名称
        # print("predictor_inputs_name:{}".format(predictor_inputs_name))
        inputs = {k: inputs[k][None,] for k in predictor_inputs_name}  # 将转换后的输入调整为模型需要的格式，添加额外的维度

        inputs['image'] = inputs['image'].astype(np.float32)  # 将输入的图像数据转换为“float32”

        outputs = self.session.run(output_names=None, input_feed=inputs)  # 进行车辆预测

        # print("ONNXRuntime predict: ")

        bboxes = np.array(outputs[0])  # 从模型中获取检测车辆边界框信息
        # print("bboxes:{}".format(bboxes))
        det_result = []  # 初始化空列表，存储车辆检测结果

        for bbox in bboxes:
            if bbox[0] > -1 and bbox[1] > self.infer_config["draw_threshold"]:  # 检查类别和置信度是否满足要求
                print(f"{int(bbox[0])} {bbox[1]} "
                      f"{bbox[2]} {bbox[3]} {bbox[4]} {bbox[5]}")
                class_name = self.infer_config["label_list"][int(bbox[0].tolist())]  # 获取类别名称
                score = bbox[1]  # 置信度
                car_bbox = [int(bbox[2]), int(bbox[3]), int(bbox[4]), int(bbox[5])]  # 车边界框坐标
                det_result.append({"class_name": class_name, "score": score, "car_bbox": car_bbox})
                cv2.rectangle(raw_image, (int(bbox[2]), int(bbox[3])), (int(bbox[4]), int(bbox[5])), (0, 0, 255),
                              2)  # 在原始图像上绘制 车辆边界框

                # cv2.rectangle(image_cv, (50,100), (60,200), (0,0,255), 2)
                # cv2.imshow("1",raw_image)
        print("det_result:{}".format(det_result))
        # cv2.waitKey(0)
        # cv2.imwrite("temp.jpg",raw_image)
        return det_result


def rec_to_via2(data_dir, local_save=False, save_no_box=False, via_name="via_region_data.json"):
    sub_dirs = filesystem.get_last_dir(data_dir)

    det_car_onnx_inst, transforms = init_model()

    for sub_dir in sub_dirs:
        # if osp.exists(osp.join(sub_dir, via_name)): 
        #     print("continue ", sub_dir)
        #     continue
        if not local_save:
            save_dir = sub_dir + "_two"
            os.makedirs(save_dir)
        else:
            save_dir = sub_dir

        data_dict = {}
        files = filesystem.get_all_filepath(sub_dir, [".jpg"])
        for file_path in files:
            one_data = {}
            if not osp.exists(file_path): continue
            
            file_size = osp.getsize(file_path)
            filename = osp.basename(file_path)
            one_data["filename"] = filename 
            one_data["size"] = file_size 
            one_data["file_attributes"] = {}

            img = cv2.imread(file_path)
            rec_data = det_car_onnx_inst.predict_image(transforms, img)

            if len(rec_data) == 0 and not save_no_box: 
                continue

            wh_threshold = 100
            big_flag = False
            regions = []
            exist_dict = {}
            for item in rec_data:
                x1, y1, x2, y2 = item["car_bbox"]
                label = item["class_name"]
                if exist_dict.get("{}x{}".format(x1,y1), None): continue
                exist_dict["{}x{}".format(x1,y1)] = 1

                # if w > wh_threshold or h > wh_threshold: big_flag = True

                region = {
                    "shape_attributes": {
                        "name": "rect",
                        "x": x1,
                        "y": y1,
                        "width": x2-x1,
                        "height": y2-y1
                    },
                    "region_attributes":{
                        "label": label
                    }
                }
                regions.append(region)

            # if (not big_flag) and len(rec_data["items"]) < 2: continue
            if not local_save:
                shutil.copy(file_path, osp.join(save_dir, filename))

            one_data["regions"] = regions
            data_dict[filename+str(file_size)] = one_data
            # print(one_data)
        
        with open(osp.join(save_dir, "via_region_data.rec.json"), "w") as rf:
            rf.write(json.dumps(data_dict))

def init_model():

    infer_config = {}
    infer_config["preprocess_infos"] = [{'interp': 2, 'keep_ratio': False, 'target_size': [640, 640], 'type': 'Resize'},
                                        {'type': 'Permute'}]

    infer_config["draw_threshold"] = 0.5
    infer_config["use_dynamic_shape"] = False
    infer_config["arch"] = 'YOLO'
    infer_config["min_subgraph_size"] = 3
    infer_config["Preprocess"] = [{'interp': 2, 'keep_ratio': False, 'target_size': [640, 640], 'type': 'Resize'},
                                  {'type': 'Permute'}]
    infer_config["label_list"] = ['vehicle']
    infer_config["model_filename"] = osp.join(osp.dirname(osp.abspath(__file__)), "./onnx/my_lpr1.onnx")
    infer_config["device"] = "cpu"  # 神经网络推理的设备，'cpu':cpu推理，0：0号显卡推理，1:1号显卡推理，...
    infer_config["encrypt_way"] = "raw"  # 模型加密方法 "raw":没有加密，"aes":aes方法加密

    transforms = Compose(infer_config["preprocess_infos"])

    det_car_onnx_inst = Det_Car_Onnx(infer_config)
    det_car_onnx_inst.init_onnx_model()
    # model_filename="my_lpr1.onnx"
    # predictor = InferenceSession(model_filename,providers=['CUDAExecutionProvider'])
    # load infer config

    # infer_config = PredictConfig(infer_config)
    # print("infer_config:{}".format(infer_config))
    return det_car_onnx_inst, transforms

def main():
    img_name_list = [r'E:\work\dataset\det\sucai\3.deal\IMG_1769\0\IMG_1769.MOV_117.jpg']

    det_car_onnx_inst, transforms = init_model()

    img_list = []
    for img_name in img_name_list:
        raw_image = cv2.imread(img_name)  # process_vehicle_plate_recog.
        if raw_image is not None:
            # img_list.append(image)
            # predict_image(infer_config, transforms, raw_image)
            res = det_car_onnx_inst.predict_image(transforms, raw_image)
            # print(res)


if __name__ == '__main__':
    data_dir = r"E:\work\dataset\det\sucai\2.deal\VID_20230824_102122\1.ok"
    rec_to_via2(data_dir, local_save=True, save_no_box=True)
