
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
import numpy as np
import cv2, glob
from matplotlib import pyplot as plt
from matplotlib import patches
from Model import makeDarkNet, makeDarkNet53, makeDarkNet2


def visualize_result(imgPath, model, path):
    GRIDSZ = 16
    ANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
    # [512,512,3] 0~255, BGR
    # img = cv2.imread(imgPath)
    img = cv2.imdecode(np.fromfile(imgPath, dtype=np.uint8), -1)

    img = img[..., ::-1] / 255.
    img = tf.cast(img, dtype=tf.float32)
    # [1,512,512,3]
    img = tf.expand_dims(img, axis=0)
    # [1,16,16,5,7]
    y_pred = model(img, training=False)
    print(y_pred.shape)

    x_grid = tf.tile(tf.range(GRIDSZ), [GRIDSZ])
    # [1, 16,16,1,1]
    x_grid = tf.reshape(x_grid, (1, GRIDSZ, GRIDSZ, 1, 1))
    x_grid = tf.cast(x_grid, dtype=tf.float32)
    y_grid = tf.transpose(x_grid, (0, 2, 1, 3, 4))
    xy_grid = tf.concat([x_grid, y_grid], axis=-1)
    # [1, 16, 16, 5, 2]
    xy_grid = tf.tile(xy_grid, [1, 1, 1, 5, 1])

    anchors = np.array(ANCHORS).reshape(5, 2)
    pred_xy = tf.sigmoid(y_pred[..., 0:2])
    pred_xy = pred_xy + xy_grid
    # normalize 0~1
    pred_xy = pred_xy / tf.constant([16., 16.])

    pred_wh = tf.exp(y_pred[..., 2:4])
    pred_wh = pred_wh * anchors
    pred_wh = pred_wh / tf.constant([16., 16.])

    # [1,16,16,5,1]
    pred_conf = tf.sigmoid(y_pred[..., 4:5])
    # l1 l2
    pred_prob = tf.nn.softmax(y_pred[..., 5:])

    pred_xy, pred_wh, pred_conf, pred_prob = \
        pred_xy[0], pred_wh[0], pred_conf[0], pred_prob[0]

    boxes_xymin = pred_xy - 0.5 * pred_wh
    boxes_xymax = pred_xy + 0.5 * pred_wh
    # [16,16,5,2+2]
    boxes = tf.concat((boxes_xymin, boxes_xymax), axis=-1)
    # [16,16,5,2]
    box_score = pred_conf * pred_prob
    # [16,16,5]
    box_class = tf.argmax(box_score, axis=-1)
    # [16,16,5]
    box_class_score = tf.reduce_max(box_score, axis=-1)
    # [16,16,5]
    # print(box_class_score)
    pred_mask = box_class_score > 0.85
    # [16,16,5,4]=> [N,4]
    boxes = tf.boolean_mask(boxes, pred_mask)
    # [16,16,5] => [N]
    scores = tf.boolean_mask(box_class_score, pred_mask)
    # 【16,16，5】=> [N]
    classes = tf.boolean_mask(box_class, pred_mask)

    boxes = boxes * 512.
    # [N] => [n]
    select_idx = tf.image.non_max_suppression(boxes, scores, 40, iou_threshold=0.3)
    boxes = tf.gather(boxes, select_idx)
    scores = tf.gather(scores, select_idx)
    classes = tf.gather(classes, select_idx)

    # plot
    fig, ax = plt.subplots(1, figsize=(10, 10))
    ax.imshow(img[0])
    n_boxes = boxes.shape[0]
    ax.set_title('boxes:%d' % n_boxes)
    for i in range(n_boxes):
        x1, y1, x2, y2 = boxes[i]
        w = x2 - x1
        h = y2 - y1
        label = classes[i].numpy()

        if label == 0:  # sugarweet
            color = (0, 1, 0)
        else:
            color = (1, 0, 0)

        rect = patches.Rectangle((x1.numpy(), y1.numpy()), w.numpy(), h.numpy(), linewidth=3, edgecolor=color,
                                 facecolor='none')
        ax.add_patch(rect)

    imgPathSplit = str.split(imgPath, "\\")
    fig.savefig(path + imgPathSplit[-1])



def getResult(imgPath, model, savePath):
    GRIDSZ = 16
    ANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
    # [512,512,3] 0~255, BGR
    img = cv2.imread(imgPath)
    img = img[..., ::-1] / 255.
    img = tf.cast(img, dtype=tf.float32)
    # [1,512,512,3]
    img = tf.expand_dims(img, axis=0)
    # [1,16,16,5,7]
    y_pred = model(img, training=False)

    x_grid = tf.tile(tf.range(GRIDSZ), [GRIDSZ])
    # [1, 16,16,1,1]
    x_grid = tf.reshape(x_grid, (1, GRIDSZ, GRIDSZ, 1, 1))
    x_grid = tf.cast(x_grid, dtype=tf.float32)
    y_grid = tf.transpose(x_grid, (0, 2, 1, 3, 4))
    xy_grid = tf.concat([x_grid, y_grid], axis=-1)
    # [1, 16, 16, 5, 2]
    xy_grid = tf.tile(xy_grid, [1, 1, 1, 5, 1])

    anchors = np.array(ANCHORS).reshape(5, 2)
    pred_xy = tf.sigmoid(y_pred[..., 0:2])
    pred_xy = pred_xy + xy_grid
    # normalize 0~1
    pred_xy = pred_xy / tf.constant([16., 16.])

    pred_wh = tf.exp(y_pred[..., 2:4])
    pred_wh = pred_wh * anchors
    pred_wh = pred_wh / tf.constant([16., 16.])

    # [1,16,16,5,1]
    pred_conf = tf.sigmoid(y_pred[..., 4:5])
    # l1 l2
    pred_prob = tf.nn.softmax(y_pred[..., 5:])

    pred_xy, pred_wh, pred_conf, pred_prob = \
        pred_xy[0], pred_wh[0], pred_conf[0], pred_prob[0]

    boxes_xymin = pred_xy - 0.5 * pred_wh
    boxes_xymax = pred_xy + 0.5 * pred_wh
    # [16,16,5,2+2]
    boxes = tf.concat((boxes_xymin, boxes_xymax), axis=-1)
    # [16,16,5,2]
    box_score = pred_conf * pred_prob
    # [16,16,5]
    box_class = tf.argmax(box_score, axis=-1)
    # [16,16,5]
    box_class_score = tf.reduce_max(box_score, axis=-1)
    # [16,16,5]
    pred_mask = box_class_score > 0.45
    # [16,16,5,4]=> [N,4]
    boxes = tf.boolean_mask(boxes, pred_mask)
    # [16,16,5] => [N]
    scores = tf.boolean_mask(box_class_score, pred_mask)
    # 【16,16，5】=> [N]
    classes = tf.boolean_mask(box_class, pred_mask)

    boxes = boxes * 512.
    # [N] => [n]
    select_idx = tf.image.non_max_suppression(boxes, scores, 40, iou_threshold=0.3)
    boxes = tf.gather(boxes, select_idx)
    scores = tf.gather(scores, select_idx)
    classes = tf.gather(classes, select_idx)

    n_boxes = boxes.shape[0]
    for i in range(n_boxes):
        x1, y1, x2, y2 = boxes[i]
        x1 = int(x1 * 2.5)
        y1 = int(y1 * 2.5 - 280.)
        x2 = int(x2 * 2.5)
        y2 = int(y2 * 2.5 - 280.)

    imgPathSplit = str.split(imgPath, "\\")
    print(imgPathSplit[-1])




def compute_IOU(rec1, rec2):
    """
    计算两个矩形框的交并比。
    :param rec1: (x0,y0,x1,y1)      (x0,y0)代表矩形左上的顶点，（x1,y1）代表矩形右下的顶点。下同。
    :param rec2: (x0,y0,x1,y1)
    :return: 交并比IOU.
    """
    left_column_max = max(rec1[0], rec2[0])
    right_column_min = min(rec1[2], rec2[2])
    up_row_max = max(rec1[1], rec2[1])
    down_row_min = min(rec1[3], rec2[3])
    # 两矩形无相交区域的情况
    if left_column_max >= right_column_min or down_row_min <= up_row_max:
        return 0
    # 两矩形有相交区域的情况
    else:
        S1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
        S2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
        S_cross = (down_row_min - up_row_max) * (right_column_min - left_column_max)
        return S_cross / (S1 + S2 - S_cross)



if __name__ == '__main__':

    # model = tf.keras.models.load_model("model")

    # model = makeDarkNet53([1, 2, 3, 3, 2])
    model = makeDarkNet()
    # model = tf.keras.models.load_model("BestModel_463epoch_0.01467659555375576loss.h5")
    # model = tf.keras.models.load_model("./BestModel")
    # model.load_weights("savedModels\\BestModel_325epoch_0.014266305617869847loss.h5")

    # model = tf.keras.models.load_model("BestDarkNet53")
    # model = tf.keras.models.load_model("model")
    model._set_inputs(tf.TensorSpec([None, 512, 512, 3], tf.float32, name='inputs'))
    # model.save("BestDarkNet53")
    model.load_weights("savedModels\\BestModel_6epoch_0.05851055811159313loss.h5")
    # model.load_weights("savedModels\\newmodel2_darknet18\\BestModel_213epoch_0.027734212316572665loss.h5")
    model.save("Model_2023")


    # files = glob.glob('data\\serverImg\\*.jpg')
    # for x in files:
    #     visualize_result(x, model, "data\\result2\\")


    # files = glob.glob('data\\zhuaqu\\wz\\*.jpg')
    # # files = glob.glob('data/final2_20211128/*.jpg')
    # # for x in files[:int(0.05 * len(files))]:
    # for x in files:
    #     # y = model.forward(x)
    #     # y = model(x)
    #     # print(y.shape)
    #     visualize_result(x, model, "data\\result4\\wz\\")
    #     # getResult(x, model)
    # # plt.show()
    # # plt.waitforbuttonpress()




    # files = glob.glob('data\\zhuaqu\\dyq\\*.jpg')
    # for x in files:
    #     visualize_result(x, model, "data\\result4\\dyq\\")
    # files = glob.glob('data\\zhuaqu\\pht\\*.jpg')
    # for x in files:
    #     visualize_result(x, model, "data\\result4\\pht\\")
    # files = glob.glob('data\\zhuaqu\\gsd\\*.jpg')
    # for x in files:
    #     visualize_result(x, model, "data\\result4\\gsd\\")
    # files = glob.glob('data\\test2\\*.jpg')
    # for x in files:
    #     visualize_result(x, model, "data\\result\\")


    # base64str =
    # base64str = base64str.replace('+', '-')
    # base64str = base64str.replace('/', '_')
    # img = tf.io.decode_base64(base64str)
    # img = tf.io.decode_jpeg(img)
    #
    # fig, ax = plt.subplots(1, figsize=(10, 10))
    # ax.imshow(img)
    # plt.waitforbuttonpress()
    #

    files = glob.glob('data\\train2\\*.jpg')
    for x in files:
        visualize_result(x, model, "data\\result\\")