import paddle.fluid as fluid
import paddle
import os
import numpy as np
import cv2
from add_mosaic.pyramidbox import PyramidBox

# os.environ['FLAGS_fraction_of_gpu_memory_to_use']='0.2'

class LicensePlateDetection(object):

    def __init__(self):
        self.work_dir = os.path.dirname(os.path.abspath(__file__))
        self.model_dir = os.path.join(self.work_dir, '../model_data/license_plate/')
        self.clsid2catid, self.catid2name = coco17_category_info(False)
        startup_prog = fluid.Program()
        # place = fluid.CPUPlace()
        if paddle.device.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
        else:
            place = fluid.CPUPlace()
        global exe, inference_program, feed_target_names, fetch_targets
        exe = fluid.Executor(place)
        exe.run(startup_prog)
        # print(os.path.abspath(self.model_dir))
        # self.model_dir=r"E:\民政部署\face_lp\model\model_license"
        [inference_program, feed_target_names, fetch_targets] = (fluid.io.load_inference_model(
            dirname=self.model_dir, executor=exe,
            model_filename='__model_infer__', params_filename='__params__'))

    def detection_license_plate(self,image):
        licensePlateResult=""
        bboxes = self.predict_directly(image, self.clsid2catid)
        if len(bboxes) != 0:
            licensePlateResult='LicensePlate '
            for i in bboxes:
                # if i["score"] > self.draw_threshold:
                xmin, ymin = i["bbox"][0], i["bbox"][1]
                xmax = i["bbox"][0] + i["bbox"][2]
                ymax = i["bbox"][1] + i["bbox"][3]
                xmin, ymin, xmax, ymax = int(xmin), int(ymin), int(xmax), int(ymax)
                licensePlateResult=licensePlateResult+f'{str(xmin)} {str(ymin)} {str(xmax)} {str(ymax)} '
        return licensePlateResult

    def detect_plate_number_image_video(self):
        video_capture = cv2.VideoCapture('E:/Video/天府五街.mp4')
        while True:
            ret, frame = video_capture.read()  # frame shape 640*480*3
            # image = Image.fromarray(frame[...])
            # image = Image.fromarray(frame[...,::-1])  # bgr to rgb
            bboxes = self.predict_directly(frame[..., ::-1], self.clsid2catid)
            if len(bboxes) != 0:
                for i in bboxes:
                    # if i["score"] > self.draw_threshold:
                    xmin, ymin = i["bbox"][0], i["bbox"][1]
                    xmax = i["bbox"][0] + i["bbox"][2]
                    ymax = i["bbox"][1] + i["bbox"][3]
                    xmin, ymin, xmax, ymax = int(xmin), int(ymin), int(xmax), int(ymax)

                    # bbox_eval = [i["category_id"], float(i["score"]), xmin, ymin, xmax, ymax, img_name]
                    cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (255, 0, 0), 2)
            cv2.imshow('', frame)
            # Press Q to stop!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cv2.destroyAllWindows()

    # dont crop input image,infer directly
    def predict_directly(self, image_BGR, clsid2catid):
        test_img, img_shape = self.Prepocess_Decode(image_BGR)
        # print("220",type(cv_img))
        # print("shape of test_img:", test_img.shape)
        img_shape = np.array(img_shape).reshape(1, 2)
        img_shape = img_shape.astype('int32')
        keys = ["bbox", "im_id"]
        # print(img_shape.dtype)
        # exit()
        # st = datetime.datetime.now()
        outs = exe.run(inference_program,
                       feed={feed_target_names[0]: test_img, feed_target_names[1]: img_shape},
                       fetch_list=fetch_targets,
                       return_numpy=False)
        # end = datetime.datetime.now()
        # cost_time = (end - st).microseconds
        res = {
            k: (np.array(v), v.recursive_sequence_lengths())
            for k, v in zip(keys, outs)
        }
        bbox_results = None
        is_bbox_normalized = False
        if "bbox" in res:
            # output bboxes with label is 6
            bbox_results = bbox2out([res], clsid2catid, is_bbox_normalized)

        return bbox_results

    def Prepocess_Decode(self, img_bgr):
        # next line used to open an image from img_path,return array of img in RGB mode
        try:
            test_img = img_bgr
            # cv_img, test_img = self.DecodeImage(img_path)
        except:
            raise
        img_shape = test_img.shape[:2]
        test_img = ResizeImage(test_img)
        test_img = NormalizeImage(test_img)
        test_img = Permute(test_img)
        test_img = test_img[np.newaxis, :]  # .reshape(1, 3, 608, 608)
        np.save('infer_pre.npy', test_img)
        return test_img, img_shape


def coco17_category_info(with_background=True):
    """
    Get class id to category id map and category id
    to category name map of COCO2017 dataset

    Args:
        with_background (bool, default True):
            whether load background as class 0.
    """
    clsid2catid = {
        1: 1,
        2: 2,
        3: 3,
        4: 4,
        5: 5,
        6: 6,
        7: 7,
        8: 8,
        9: 9,
        10: 10,
        11: 11,
        12: 13,
        13: 14,
        14: 15,
        15: 16,
        16: 17,
        17: 18,
        18: 19,
        19: 20,
        20: 21,
        21: 22,
        22: 23,
        23: 24,
        24: 25,
        25: 27,
        26: 28,
        27: 31,
        28: 32,
        29: 33,
        30: 34,
        31: 35,
        32: 36,
        33: 37,
        34: 38,
        35: 39,
        36: 40,
        37: 41,
        38: 42,
        39: 43,
        40: 44,
        41: 46,
        42: 47,
        43: 48,
        44: 49,
        45: 50,
        46: 51,
        47: 52,
        48: 53,
        49: 54,
        50: 55,
        51: 56,
        52: 57,
        53: 58,
        54: 59,
        55: 60,
        56: 61,
        57: 62,
        58: 63,
        59: 64,
        60: 65,
        61: 67,
        62: 70,
        63: 72,
        64: 73,
        65: 74,
        66: 75,
        67: 76,
        68: 77,
        69: 78,
        70: 79,
        71: 80,
        72: 81,
        73: 82,
        74: 84,
        75: 85,
        76: 86,
        77: 87,
        78: 88,
        79: 89,
        80: 90
    }

    catid2name = {
        0: 'background',
        1: 'person',
        2: 'bicycle',
        3: 'car',
        4: 'motorcycle',
        5: 'airplane',
        6: 'bus',
        7: 'train',
        8: 'truck',
        9: 'boat',
        10: 'traffic light',
        11: 'fire hydrant',
        13: 'stop sign',
        14: 'parking meter',
        15: 'bench',
        16: 'bird',
        17: 'cat',
        18: 'dog',
        19: 'horse',
        20: 'sheep',
        21: 'cow',
        22: 'elephant',
        23: 'bear',
        24: 'zebra',
        25: 'giraffe',
        27: 'backpack',
        28: 'umbrella',
        31: 'handbag',
        32: 'tie',
        33: 'suitcase',
        34: 'frisbee',
        35: 'skis',
        36: 'snowboard',
        37: 'sports ball',
        38: 'kite',
        39: 'baseball bat',
        40: 'baseball glove',
        41: 'skateboard',
        42: 'surfboard',
        43: 'tennis racket',
        44: 'bottle',
        46: 'wine glass',
        47: 'cup',
        48: 'fork',
        49: 'knife',
        50: 'spoon',
        51: 'bowl',
        52: 'banana',
        53: 'apple',
        54: 'sandwich',
        55: 'orange',
        56: 'broccoli',
        57: 'carrot',
        58: 'hot dog',
        59: 'pizza',
        60: 'donut',
        61: 'cake',
        62: 'chair',
        63: 'couch',
        64: 'potted plant',
        65: 'bed',
        67: 'dining table',
        70: 'toilet',
        72: 'tv',
        73: 'laptop',
        74: 'mouse',
        75: 'remote',
        76: 'keyboard',
        77: 'cell phone',
        78: 'microwave',
        79: 'oven',
        80: 'toaster',
        81: 'sink',
        82: 'refrigerator',
        84: 'book',
        85: 'clock',
        86: 'vase',
        87: 'scissors',
        88: 'teddy bear',
        89: 'hair drier',
        90: 'toothbrush'
    }

    if not with_background:
        clsid2catid = {k - 1: v for k, v in clsid2catid.items()}

    return clsid2catid, catid2name


def bbox2out(results, clsid2catid, is_bbox_normalized=False):
    """
    Args:
        results: request a dict, should include: `bbox`, `im_id`,
                 if is_bbox_normalized=True, also need `im_shape`.
        clsid2catid: class id to category id map of COCO2017 dataset.
        is_bbox_normalized: whether or not bbox is normalized.
    """
    xywh_res = []
    for t in results:
        bboxes = t['bbox'][0]
        lengths = t['bbox'][1][0]
        # im_ids = np.array(t['im_id'][0])
        if bboxes.shape == (1, 1) or bboxes is None:
            continue

        k = 0
        for i in range(len(lengths)):
            num = lengths[i]
            # im_id = int(im_ids[i][0])

            for j in range(num):
                dt = bboxes[k]
                clsid, score, xmin, ymin, xmax, ymax = dt.tolist()
                catid = (clsid2catid[int(clsid)])
                # print(catid,type(catid))
                if catid == 6:
                    if is_bbox_normalized:
                        xmin, ymin, xmax, ymax = \
                            clip_bbox([xmin, ymin, xmax, ymax])
                        w = xmax - xmin
                        h = ymax - ymin
                        im_height, im_width = t['im_shape'][0][i].tolist()
                        xmin *= im_width
                        ymin *= im_height
                        w *= im_width
                        h *= im_height
                    else:
                        w = xmax - xmin + 1
                        h = ymax - ymin + 1

                    bbox = [xmin, ymin, w, h]
                    coco_res = {
                        # 'image_id': im_id,
                        'category_id': catid,
                        'bbox': bbox,
                        'score': score
                    }
                    xywh_res.append(coco_res)
                k += 1
    return xywh_res


def clip_bbox(bbox):
    xmin = max(min(bbox[0], 1.), 0.)
    ymin = max(min(bbox[1], 1.), 0.)
    xmax = max(min(bbox[2], 1.), 0.)
    ymax = max(min(bbox[3], 1.), 0.)
    return xmin, ymin, xmax, ymax


def ResizeImage(im, target_size=576, max_size=0):
    if len(im.shape) != 3:
        raise ValueError('image is not 3-dimensional.')
    im_shape = im.shape
    # print(im_shape)
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    if float(im_size_min) == 0:
        raise ZeroDivisionError('min size of image is 0')
    if max_size != 0:
        im_scale = float(target_size) / float(im_size_min)
        # Prevent the biggest axis from being more than max_size
        if np.round(im_scale * im_size_max) > max_size:
            im_scale = float(max_size) / float(im_size_max)
        im_scale_x = im_scale
        im_scale_y = im_scale
    else:
        im_scale_x = float(target_size) / float(im_shape[1])
        im_scale_y = float(target_size) / float(im_shape[0])

    im = cv2.resize(
        im,
        None,
        None,
        fx=im_scale_x,
        fy=im_scale_y,
        interpolation=2)
    return im


def NormalizeImage(im, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_scale=True):
    """Normalize the image.
    Operators:
        1.(optional) Scale the image to [0,1]
        2. Each pixel minus mean and is divided by std
    """
    im = im.astype(np.float32, copy=False)
    mean = np.array(mean)[np.newaxis, np.newaxis, :]
    std = np.array(std)[np.newaxis, np.newaxis, :]
    if is_scale:
        im = im / 255.0
    im -= mean
    im /= std
    return im


def Permute(im, channel_first=True, to_bgr=False):
    if channel_first:
        im = np.swapaxes(im, 1, 2)
        im = np.swapaxes(im, 1, 0)
    if to_bgr:
        im = im[[2, 1, 0], :, :]
    return im


if __name__ == '__main__':
    licensePlateDetection = LicensePlateDetection()
    licensePlateDetection.detect_plate_number_image_video()

    # addMosaic.detect_face_image(addMosaic.Prepocess_Decode("E:/AI/人车图片/多媒体/110102/行政区域/大栅栏街道12.jpg"))
