
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
from tensorflow import keras
from matplotlib import pyplot as plt
from matplotlib import patches
import numpy as np
import base64
from io import BytesIO
import cv2

class TrashDetector:

    def __init__(self, modelPath="./model", modelWeightPath=None):
        """
        :param modelPath: the model file path
        self._model: the tensorflow model object
        self._modelUsing: a bool to record model is using, to simply prevent two or
                        more thread use one TrashDetector
        # self._imgBuffer: a buffer to save the img to be detect
        # self._resBuffer: a buffer to save the result of img detection
        """
        self._modelUsing = True
        self.GRIDSZ = 16
        self.ANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843,
                        5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
        try:
            self._model = keras.models.load_model(modelPath)
            if not modelWeightPath == None:
                self._model.load_weights(modelWeightPath)
            # self._imgBuffer = None
            # self._resBuffer = None

            self.resAddTensor = np.array([0., -280., 0., -280.], dtype=np.float32)
        except Exception:
            self._modelUsing = False
            raise
        else:
            self._modelUsing = False



    def detectByBase64(self, imgBase64code:str, codeUrlSafe:bool=False, imgFormat:str="jpg", cropPoint=None):
        """
        directly detecte a image
        :param imgBase64code: an img with base64code
        :param codeUrlSafe: the base64 code is use the url safe mod
        :param imgFormat: the format of img
        :return:
        """
        if self._modelUsing:
            return None
        self._modelUsing = True

        try:
            img = self._base64CodeToTensor(imgBase64code, codeUrlSafe, imgFormat)
            # primerImg = tf.reshape(img, (1, img.shape[0], img.shape[1], img.shape[2])).numpy()
            primerImg = img.numpy()

            img = self._resizeImg(img)
            predict = self._model(img, training=False)
        except Exception:
            self._modelUsing = False
            raise
        else:
            self._modelUsing = False

        xb = None
        xe = None
        yb = None
        ye = None
        if cropPoint is not None:
            xb = cropPoint[0]
            yb = cropPoint[1]
            xe = cropPoint[2]
            ye = cropPoint[3]

        rClassName, rNumDetect, rLocation = self._handlePrecict(predict=predict)

        rmIdxList = []
        for i in range(rNumDetect):
            if rClassName[i] == 0:  # trash1
                color = (0, 255, 0)
            else:  # trash2
                color = (255, 0, 0)
            x1, y1, x2, y2 = rLocation[i]

            # point is crop check
            if cropPoint is not None:
                continueFlag = False

                if (x1 < xb) or (x1 > xe):
                    continueFlag = True
                if (x2 < xb) or (x2 > xe):
                    continueFlag = True
                if (y1 < yb) or (y1 > ye):
                    continueFlag = True
                if (y2 < yb) or (y2 > ye):
                    continueFlag = True
                if continueFlag:
                    rmIdxList.append(i)
                    continue

            primerImg = cv2.rectangle(primerImg, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)

        primerImg = cv2.cvtColor(primerImg, cv2.COLOR_BGR2RGB)

        if cropPoint is not None:
            # crop img
            primerX, primerY, _ = primerImg.shape

            primerImg = primerImg[int(yb):int(ye), int(xb):int(xe), :]
            print(primerImg.shape)
            primerImg = cv2.resize(primerImg, (primerY,primerX))
            # remove out of range rectangle
            for i in range(len(rmIdxList)):
                del rClassName[rmIdxList[i] - i]
                del rLocation[rmIdxList[i] - i]
            # reduce out of range rectangle num
            rNumDetect -= len(rmIdxList)

        return rClassName, rNumDetect, rLocation, primerImg



    def detectByTFTensor(self, tfTensor, imgFormat:str="jpg", cropPoint=None):
        """
        directly detecte a image
        :param imgBase64code: an img with base64code
        :param tfTensor: an tensorflow tensor
        :param imgFormat: the format of img
        :param GRIDSZ: a mysterious parameter from the model
        :param ANCHORS: ten mysterious parameters from the model
        :return:
        """
        if self._modelUsing:
            return None
        self._modelUsing = True

        try:
            # img = self._base64CodeToTensor(imgBase64code, codeUrlSafe, imgFormat)
            # primerImg = tf.reshape(img, (1, img.shape[0], img.shape[1], img.shape[2])).numpy()
            primerImg = tfTensor
            primerImg = tf.reshape(primerImg, [primerImg.shape[1], primerImg.shape[2], primerImg.shape[3]])
            primerImg = primerImg.numpy()

            img = self._resizeImg(tfTensor)
            predict = self._model(img, training=False)
        except Exception:
            self._modelUsing = False
            raise
        else:
            self._modelUsing = False

        xb = None
        xe = None
        yb = None
        ye = None
        if cropPoint is not None:
            xb = cropPoint[0]
            yb = cropPoint[1]
            xe = cropPoint[2]
            ye = cropPoint[3]

        rClassName, rNumDetect, rLocation = self._handlePrecict(predict=predict)

        rmIdxList = []
        for i in range(rNumDetect):
            if rClassName[i] == 0:  # trash1
                color = (0, 255, 0)
            else:  # trash2
                color = (255, 0, 0)
            x1, y1, x2, y2 = rLocation[i]

            # point is crop check
            if cropPoint is not None:
                continueFlag = False

                if (x1 < xb) or (x1 > xe):
                    continueFlag = True
                if (x2 < xb) or (x2 > xe):
                    continueFlag = True
                if (y1 < yb) or (y1 > ye):
                    continueFlag = True
                if (y2 < yb) or (y2 > ye):
                    continueFlag = True
                if continueFlag:
                    rmIdxList.append(i)
                    continue

            primerImg = cv2.rectangle(primerImg, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)

        primerImg = cv2.cvtColor(primerImg, cv2.COLOR_BGR2RGB)

        if cropPoint is not None:
            # crop img
            primerX, primerY, _ = primerImg.shape
            primerImg = primerImg[int(yb):int(ye), int(xb):int(xe), :]
            primerImg = cv2.resize(primerImg, (primerY,primerX))
            # remove out of range rectangle
            for i in range(len(rmIdxList)):
                del rClassName[rmIdxList[i] - i]
                del rLocation[rmIdxList[i] - i]
            # reduce out of range rectangle num
            rNumDetect -= len(rmIdxList)

        return rClassName, rNumDetect, rLocation, primerImg



    def _handlePrecict(self, predict):
        GRIDSZ = self.GRIDSZ
        ANCHORS = self.ANCHORS
        x_grid = tf.tile(tf.range(GRIDSZ), [GRIDSZ])
        # [1, 16,16,1,1]
        x_grid = tf.reshape(x_grid, (1, GRIDSZ, GRIDSZ, 1, 1))
        x_grid = tf.cast(x_grid, dtype=tf.float32)
        y_grid = tf.transpose(x_grid, (0, 2, 1, 3, 4))
        xy_grid = tf.concat([x_grid, y_grid], axis=-1)
        # [1, 16, 16, 5, 2]
        xy_grid = tf.tile(xy_grid, [1, 1, 1, 5, 1])

        anchors = np.array(ANCHORS).reshape(5, 2)
        pred_xy = tf.sigmoid(predict[..., 0:2])
        pred_xy = pred_xy + xy_grid
        # normalize 0~1
        pred_xy = pred_xy / tf.constant([16., 16.])

        pred_wh = tf.exp(predict[..., 2:4])
        pred_wh = pred_wh * anchors
        pred_wh = pred_wh / tf.constant([16., 16.])

        # [1,16,16,5,1]
        pred_conf = tf.sigmoid(predict[..., 4:5])
        # l1 l2
        pred_prob = tf.nn.softmax(predict[..., 5:])

        pred_xy, pred_wh, pred_conf, pred_prob = \
            pred_xy[0], pred_wh[0], pred_conf[0], pred_prob[0]

        boxes_xymin = pred_xy - 0.5 * pred_wh
        boxes_xymax = pred_xy + 0.5 * pred_wh
        # [16,16,5,2+2]
        boxes = tf.concat((boxes_xymin, boxes_xymax), axis=-1)
        # [16,16,5,2]
        box_score = pred_conf * pred_prob
        # [16,16,5]
        box_class = tf.argmax(box_score, axis=-1)
        # [16,16,5]
        box_class_score = tf.reduce_max(box_score, axis=-1)
        # [16,16,5]
        pred_mask = box_class_score > 0.85
        # [16,16,5,4]=> [N,4]
        boxes = tf.boolean_mask(boxes, pred_mask)
        # [16,16,5] => [N]
        scores = tf.boolean_mask(box_class_score, pred_mask)
        # [16,16,5]=> [N]
        classes = tf.boolean_mask(box_class, pred_mask)

        boxes = boxes * 512.
        # [N] => [n]
        select_idx = tf.image.non_max_suppression(boxes, scores, 40, iou_threshold=0.3)
        boxes = tf.gather(boxes, select_idx)
        scores = tf.gather(scores, select_idx)
        classes = tf.gather(classes, select_idx)

        boxes *= 2.5
        boxes = boxes.numpy()
        for i in range(boxes.shape[0]):
            boxes[i] += self.resAddTensor
        # print(tf.reduce_max(primerImg))
        # print(tf.reduce_min(primerImg))

        return classes.numpy().tolist(), boxes.shape[0], boxes.tolist()



    def _base64CodeToTensor(self, imgBase64code:str, codeUrlSafe:bool, imgFormat:str):
        """
        convert a base64 img to tensor img
        :param imgBase64code: an img with base64code
        :param codeUrlSafe: the base64 code is use the url safe mod
        :param imgFormat: the format of img
        :return: a tensorflow tensor with img
        """
        if not codeUrlSafe:
            # unUrlSafe code to urlSafe cod
            imgBase64code = imgBase64code.replace('+', '-')
            imgBase64code = imgBase64code.replace('/', '_')

        img = tf.io.decode_base64(imgBase64code)
        if imgFormat == "jpg":
            img = tf.io.decode_jpeg(img, channels=3)
        elif imgFormat == "png":
            img = tf.io.decode_png(img, channels=3)
        else:
            img = tf.io.decode_image(img, channels=3)

        return img



    def _resizeImg(self, img):
        """
        :param img: resize a tensor img to [1, 512, 512, 3] shape
        :return: a tensorflow tensor with img
        """

        imgWidth = img.shape[1]
        imgHigh = img.shape[0]
        if imgWidth != 512 or imgHigh != 512:
            if imgWidth != 1280 or imgHigh != 720:
                img = tf.image.resize(img, (1280, 720), antialias=True)

            img = tf.image.resize_with_pad(img, 512, 512, antialias=True)
            img /= 255.

        img = tf.image.convert_image_dtype(img, tf.float32)
        img = tf.reshape(img, (1, 512, 512, 3))


        return img


    def _imageToBase64(self, image_np):
        image = cv2.imencode('.jpg', image_np)[1]
        image_code = str(base64.b64encode(image))[2:-1]

        return image_code


def func(img):
    # 把图片转换为灰度图
    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # 获取灰度图矩阵的行数和列数
    r, c = gray_img.shape[:2]
    dark_sum = 0  # 偏暗的像素 初始化为0个
    light_sum = 0
    dark_prop = 0  # 偏暗像素所占比例初始化为0
    piexs_sum = r * c  # 整个弧度图的像素个数为r*c

    # 遍历灰度图的所有像素
    for row in gray_img:
        for colum in row:
            if colum < 40:  # 人为设置的超参数,表示0~39的灰度值为暗
                dark_sum += 1
            elif colum > 180:
                light_sum += 1
    dark_prop = dark_sum / (piexs_sum)
    light_prop = light_sum / (piexs_sum)

    # print("dark_sum:" + str(dark_sum))
    # print("piexs_sum:" + str(piexs_sum))
    # print("dark_prop=dark_sum/piexs_sum:" + str(dark_prop))
    # print("light_prop=light_sum/piexs_sum:" + str(light_prop))
    if dark_prop >= 1.0:  # 人为设置的超参数:表示若偏暗像素所占比例超过0.78,则这张图被认为整体环境黑暗的图片
        print("The image is dark!")
        return -1
    elif light_prop >= 0.0:
        print("The image is light!")
        return -1



def dection_main(lt, rt, lb, rb, image_base64):
    # def dection(image_base64):
    '''

    Args:
        image_base64: 图片字符串编码
        lt: 左上点  [217.6, 108]
        rt: 右上点  [1113.6, 108]
        lb: 左下点  [217.6, 554.4]
        rb: 右下点  [1113.6, 554.4]

    Returns:

    '''
    global num_detect
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # Suppress TensorFlow logging (1)

    # parser = argparse.ArgumentParser()
    # parser.add_argument('--image', type=str, help='The file to detection', default='')
    # parser.add_argument('--model_dir', type=str, help='The model directory', default='')
    # parser.add_argument('--label_path', type=str, help='The label path', default='')
    # args = parser.parse_args()

    tf.get_logger().setLevel('ERROR')  # Suppress TensorFlow logging (2)

    # Enable GPU dynamic memory allocation
    gpus = tf.config.experimental.list_physical_devices('GPU')
    for gpu in gpus:
        tf.config.experimental.set_memory_growth(gpu, True)

    # model_dir = "./mydata"
    # model_dir = args.model_dir
    # model_dir = "E:/Python/models/research/object_detection/mydata"
    # label_path = './mydata/trash_label_map.pbtxt'
    # label_path = args.label_path
    # label_path = "E:/Python/models/research/object_detection/mydata/trash_label_map.pbtxt"
    # path_saved_model = model_dir + "/saved_model"

    # Load saved model and build the detection function
    # detect_fn = tf.saved_model.load(path_saved_model)
    modelPath = "model2"
    if "DETECTOR" not in globals():
        globals()["DETECTOR"] = TrashDetector(modelPath=modelPath)

    # category_index = label_map_util.create_category_index_from_labelmap(label_path, use_display_name=True)
    #
    # warnings.filterwarnings('ignore')  # Suppress Matplotlib warnings

    # ----------------read image and test--------------------#
    # image_path = "./test_images/250.jpg"
    # image_path = args.image
    # img = ''
    # image_path = image

    ###### image_data = base64.b64decode(image_base64)  # base64解码
    ###### img_data = plt.imread(BytesIO(image_data), "JPG")  # 转为数组
 

    # is_ = func(img_data)  # 是否在正常的检测范围之内
    is_ = 0
    if len(lt) & len(rt) & len(rb) & len(lb) and is_ != -1:  # 如果有转换点 而且在正常检测范围之内

        cropPoint = [int(lt[0]), int(lt[1]), int(rb[0]), int(rb[1])]
        ###### input_tensor = tf.convert_to_tensor(img_data)

        ###### input_tensor = input_tensor[tf.newaxis, ...]

        # class_name, num_detect, location, image_np_with_detections = globals()["DETECTOR"].detectByTFTensor(input_tensor, cropPoint=cropPoint)
        class_name, num_detect, location, image_np_with_detections = globals()["DETECTOR"].detectByBase64(image_base64, cropPoint=cropPoint)

        return class_name, num_detect, location, image_np_with_detections

    elif is_ != -1:  # 没有转换点 但是在正常的检测范围之内
        # # image_np = cv2.imread(image_path)
        ###### input_tensor = tf.convert_to_tensor(img_data)

        # input_tensor = tf.convert_to_tensor(input_tensor)

        ###### input_tensor = input_tensor[tf.newaxis, ...]

        # detections = detect_fn(input_tensor)
        # class_name, num_detect, location, image_np_with_detections = globals()["DETECTOR"].detectByTFTensor(input_tensor)
        class_name, num_detect, location, image_np_with_detections = globals()["DETECTOR"].detectByBase64(image_base64, cropPoint=None)

        return class_name, num_detect, location, image_np_with_detections
    else:
        print('图片曝光！不检测')
        class_name = []
        location = []
        num_detect = -1
        imaimage_np_with_detections = img_data

        return class_name, num_detect, location, imaimage_np_with_detections



if __name__ == '__main__':
    import torch
    print(torch.version)
    print(torch.cuda.is_available())