# coding=utf-8

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import cv2
import numpy as np
from pathlib import Path
from tensorflow import keras
import tensorflow as tf
import cv2
import logging
import pic_data
import matplotlib.pyplot as pyplot
import datetime
def getTime_ms():
    return (datetime.datetime.now().hour*3600 +\
            datetime.datetime.now().minute*60+\
            datetime.datetime.now().second )*1000+\
           datetime.datetime.now().microsecond/1000

def draw_img(boxes, img):
    # show img
    x, y, w, h = boxes
    xmin = int(x - w / 2)
    xmax = int(x + w / 2)
    ymin = int(y - h / 2)
    ymax = int(y + h / 2)
    print("rectangle" , xmin , ymin , xmax , ymax)
    cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 255, 0), 1)
    cv2.rectangle(img , (16,28),(306,230),(0,255,0),1)
    #cv2.rectangle(img , (44,16),(278,236),(0,255,0),2)
    print("draw")
    cv2.imshow('results', img)
    cv2.waitKey(10000)
    cv2.destroyWindow("results")

def yolo_decode(prediction, anchors, num_classes, input_dims, scale_x_y=None, use_softmax=False):
    '''Decode final layer features to bounding box parameters.'''
    num_anchors = len(anchors)  # anchor 的数量
    grid_size = prediction.shape[1:3]  # 将一张图片分割成5*5
    prediction = np.reshape(prediction,
                            (grid_size[0] * grid_size[1] * num_anchors, num_classes + 5))

    # generate x_y_offset grid map
    x_y_offset = [[[j, i]] * grid_size[0] for i in range(grid_size[0]) for j in range(grid_size[0])]
    x_y_offset = np.array(x_y_offset).reshape(grid_size[0] * grid_size[1] * num_anchors , 2)

    x_y_tmp = 1 / (1 + np.exp(-prediction[..., :2]))
    box_xy = (x_y_tmp + x_y_offset) / np.array(grid_size)[::-1]

    # Log space transform of the height and width
    anchors2 = np.array(anchors*(grid_size[0] * grid_size[1]))
    box_wh = (np.exp(prediction[..., 2:4]) * anchors2) / np.array(input_dims)[::-1]

    # sigmoid function
    objectness = 1 / (1 + np.exp(-prediction[..., 4:5]))

    # sigmoid function
    if use_softmax:
        class_scores = np.exp(prediction[..., 5:]) / np.sum(np.exp(prediction[..., 5:]))
    else:
        class_scores = 1 / (1 + np.exp(-prediction[..., 5:]))
    return np.concatenate((box_xy, box_wh), axis=-1), objectness, class_scores

def box_iou(boxes):
    """
    Calculate IoU value of 1st box with other boxes of a box array

    Parameters
    ----------
    boxes: bbox numpy array, shape=(N, 4), xywh

    Returns
    -------
    iou: numpy array, shape=(N-1,)
         IoU value of boxes[1:] with boxes[0]
    """
    x = boxes[:, 0]
    y = boxes[:, 1]
    w = boxes[:, 2]
    h = boxes[:, 3]
    areas = w * h
    # left, top, right, bottom
    left = x - w / 2
    top = y - h / 2
    right = x + w / 2
    bottom = y + h / 2

    # check IoU
    # np.maximun 诸位比较,取最大值
    # np.maximum([-2, -1, 0, 1, 2], 0)
    # array([0, 0, 0, 1, 2])
    inter_xmin = np.maximum(left[1:], left[0])
    inter_ymin = np.maximum(top[1:], top[0])
    inter_xmax = np.minimum(right[1:], right[0])
    inter_ymax = np.minimum(bottom[1:], bottom[0])

    inter_w = np.maximum(0., inter_xmax - inter_xmin + 1)
    inter_h = np.maximum(0., inter_ymax - inter_ymin + 1)
    inter = inter_w * inter_h
    iou = inter / (areas[1:] + areas[0] - inter)
    return iou

def nms_boxes(boxes, classes, scores, iou_threshold=0.4, confidence=0.1,
              use_diou=True, is_soft=False, use_exp=False, sigma=0.5):
    import copy
    nboxes, nclasses, nscores = [], [], []

    # make a data copy to avoid breaking
    # during nms operation
    b_nms = copy.deepcopy(boxes)
    c_nms = copy.deepcopy(classes)
    s_nms = copy.deepcopy(scores)

    while len(s_nms) > 0:
        # pick the max box and store
        # make a data copy to avoid breaking
        i = np.argmax(s_nms)
        nboxes.append(copy.deepcopy(b_nms[i]))
        nclasses.append(copy.deepcopy(c_nms[i]))
        nscores.append(copy.deepcopy(s_nms[i]))

        # 把最大的scores 放到第一位
        b_nms[[i, 0], ...] = b_nms[[0, i], ...]
        s_nms[i], s_nms[0] = s_nms[0], s_nms[i]

        iou = box_iou(b_nms)

        b_nms = b_nms[1:]
        c_nms = c_nms[1:]
        s_nms = s_nms[1:]

        # hard nms
        keep_mast = np.where(iou <= iou_threshold)[0]

        # keep needed box for next loop
        b_nms = b_nms[keep_mast]
        c_nms = c_nms[keep_mast]
        s_nms = s_nms[keep_mast]

    # reformat result for output
    nboxes = np.array(nboxes)
    nclasses = np.array(nclasses)
    nscores = np.array(nscores)

    # nboxes[..., :2] -= (nboxes[..., 2:4] / 2)

    return nboxes, nclasses, nscores


def non_max_suppress(boxes, classes, scores, threshold=0.4):
    """ 简洁版 hard nms 实现, 单类别"""

    # center_xy, box_wh
    x = boxes[:, 0]
    y = boxes[:, 1]
    w = boxes[:, 2]
    h = boxes[:, 3]
    # left, top, right, bottom
    left = x - w / 2
    top = y - h / 2
    right = x + w / 2
    bottom = y + h / 2
    # sorted index, 从大到小
    order = np.argsort(scores)[::-1]
    ares = w * h

    keep = []  # 保留的有效的索引值, list
    # 保留置信度最高的box, 其余依次与其遍历, 删除大于阈值的box,剩
    # 下的继续保留最高置信度的box, 依次迭代
    while order.size > 0:
        keep.append(order[0])  # 永远保留置信度最高的索引
        # 最大置信度的左上角坐标分别与剩余所有的框的左上角坐标进行比较，分别保存较大值
        inter_xmin = np.maximum(left[order[0]], left[order[1:]])
        inter_ymin = np.maximum(top[order[0]], top[order[1:]])
        inter_xmax = np.minimum(right[order[0]], right[order[1:]])
        inter_ymax = np.minimum(bottom[order[0]], bottom[order[1:]])

        # 当前类所有框的面积
        # x1=3,x2=5,习惯上计算x方向长度就是x=3、4、5这三个像素，即5-3+1=3，
        # 而不是5-3=2，所以需要加1
        inter_w = np.maximum(0., inter_xmax - inter_xmin + 1)
        inter_h = np.maximum(0., inter_ymax - inter_ymin + 1)
        inter = inter_w * inter_h

        #计算重叠度IOU：重叠面积/（面积1+面积2-重叠面积）
        iou = inter / (ares[order[0]] + ares[order[1:]] - inter)

        # 计算iou的时候, 并没有计算第一个数, 所以索引对应的是order[1:]之后的, 所以需要加1
        indexs = np.where(iou <= threshold)[0]
        order = order[indexs+1]

    keep_boxes = boxes[keep]
    keep_classes = classes[keep]
    keep_scores = scores[keep]

    return keep_boxes, keep_classes, keep_scores


def filter(pred_xywh, objectness, class_scores, img_shape, iou_threshold=0.4, confidence=0.1):
    """ 得到真正的置信度，并且过滤 """
    # shape: (125, 1)
    box_scores = objectness * class_scores
    assert box_scores.shape[-1] == 1, "有不止一个类别, 该函数不可用, 仅对单类别使用"

    box_scores = np.squeeze(box_scores, axis=-1)
    # filter
    pos = np.where(box_scores >= confidence)
    if not pos:
        print("No person detected!!!")
        return
    # get all scores and boxes
    scores = box_scores[pos]
    boxes = pred_xywh[pos]
    classes = np.zeros(scores.shape, dtype=np.int8)  # 单类别
    # 相对坐标转为真实坐标
    #print(img_shape)
    #print(boxes)
    boxes[..., :2] *= img_shape
    boxes[..., 2:] *= img_shape

    # nboxes2, nclasses2, nscores2 = nms_boxes(boxes, classes, scores, iou_threshold=iou_threshold)

    nboxes, nclasses, nscores = non_max_suppress(boxes, classes, scores, threshold=iou_threshold)

    return nboxes, nclasses, nscores

def main():
    anchor = [[13, 24], [33, 42], [36, 87], [94, 63], [68, 118]]
    logging.getLogger().setLevel(logging.INFO)
    model_path = './Model/yolo-s_relu.tflite'
    #选择图片文件作为输入
    img_path = "./img1.png"
    img_raw = cv2.imread(str(img_path))
    frame = cv2.cvtColor(img_raw, cv2.COLOR_BGR2GRAY)
    small_frame = cv2.resize(frame, (160, 160), cv2.INTER_AREA) # 改变尺寸
    #选择图片转换成的数组作为输入，用于和开发板使用相同输入数据做比对
    #frame = np.array(pic_data.list_pic)
    #small_frame = frame.reshape(160,160)

    # load model
    tflite_model = tf.lite.Interpreter(model_path)
    tflite_model.allocate_tensors()
    #输入维度处理
    small_frame = np.expand_dims(small_frame, 0)	# 扩展dim=0维度
    small_frame = np.expand_dims(small_frame, 3)	# 扩展dim=3维度
    tflife_input_data = np.float32(small_frame)		# 类型转为float32
    if(tflife_input_data.dtype == 'float32'): #两种归一化方式选其一
        #tflife_input_data = tflife_input_data/128-1
        tflife_input_data = tflife_input_data/256.0
    tflife_input_details = tflite_model.get_input_details()[0]
    tflife_output_details = tflite_model.get_output_details()[0]
    tflite_model.set_tensor(tflife_input_details['index'], tflife_input_data)
    # model run
    start_time = getTime_ms()
    tflite_model.invoke()
    # model output
    output_tflite = tflite_model.get_tensor(tflife_output_details['index'])[0] 
    output_tflite = np.expand_dims(output_tflite,axis=0)
    img_shape = img_raw.shape[:-1][::-1]  # weight, height
    #yolo decode解码
    pred_xywh, objectness, class_scores = yolo_decode( output_tflite, anchor, num_classes=1,
                                                    input_dims=(160, 160), scale_x_y=0,
                                                    use_softmax=False)
    img_shape = img_shape[:-1]
    #nms
    boxes, classes, scores = filter(pred_xywh, objectness, class_scores, img_shape)
    end_time = getTime_ms()-start_time
    print("time=" , end_time)
    for box in boxes:
        draw_img(box, img_raw)
        
if __name__ == "__main__":
    main()