import os
import jpegHandler
import numpy as np
import cv2
from py_cpu_nms import yolo_output, xywh2xyxy, py_cpu_nms



def FSR_PostProcess_with_cv2(resultList, dstFilePath, fileName, index_to_class, kCategoryIndex=2):
    if resultList is not None:
        tensor_num = np.reshape(resultList[0], 32)
        tensor_bbox = np.reshape(resultList[1], (64, 304, 8))
        img = cv2.imread(fileName)
        img_rows, img_cols, img_channel = img.shape
        scale_width = img_cols / float(896)
        scale_height = img_rows / float(608)
        bboxes = []

        for attr in range(32):
            num = int(tensor_num[attr])
            for bbox_idx in range(num):
                class_idx = attr * kCategoryIndex
                lt_x = scale_width * tensor_bbox[class_idx][bbox_idx][0]
                lt_y = scale_height * tensor_bbox[class_idx][bbox_idx][1]
                rb_x = scale_width * tensor_bbox[class_idx][bbox_idx][2]
                rb_y = scale_height * tensor_bbox[class_idx][bbox_idx][3]
                score = tensor_bbox[class_idx][bbox_idx][4]
                bboxes.append([int(lt_x), int(lt_y), int(rb_x), int(rb_y), attr, score])
        print('bboxes',bboxes)
        if len(bboxes) == 0:
            return None
        for box in bboxes:
            cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 4)
            cv2.putText(img, "class:%s,score:%f" % (index_to_class[box[4]], box[5]), (box[0], box[1]),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 255), 1)
        _, filename = os.path.split(fileName)
        cv2.imwrite("%s%s"%(dstFilePath, filename),img)
        return None
    else:
        print('graph inference failed ')
        return None


def FSR_PostProcess_with_plt(resultList, dstFilePath, fileName, index_to_class, kCategoryIndex=2):
    if resultList is not None:
        tensor_num = np.reshape(resultList[0], 32)
        tensor_bbox = np.reshape(resultList[1], (64, 304, 8))
        img = cv2.imread(fileName)
        img_rows, img_cols, img_channel = img.shape
        scale_width = img_cols / float(896)
        scale_height = img_rows / float(608)
        bboxes = []
        # detections = np.empty((0, 6))
        detections = []
        for attr in range(32):
            num = int(tensor_num[attr])
            for bbox_idx in range(num):
                class_idx = attr * kCategoryIndex
                lt_x = scale_width * tensor_bbox[class_idx][bbox_idx][0]
                lt_y = scale_height * tensor_bbox[class_idx][bbox_idx][1]
                rb_x = scale_width * tensor_bbox[class_idx][bbox_idx][2]
                rb_y = scale_height * tensor_bbox[class_idx][bbox_idx][3]
                score = tensor_bbox[class_idx][bbox_idx][4]
                bboxes.append([int(lt_x), int(lt_y), int(rb_x), int(rb_y), attr, score])
                # detection for matplotlib
                detection = [lt_x, lt_y, rb_x - lt_x, rb_y - lt_y, score, attr]
                detections.append(np.array(detection))#.reshape((1, 6)))
        jpegHandler.drew_detection_on_img(np.array(detections), fileName, index_to_class, dstFilePath)
    else:
        print('graph inference failed ')
        return None



def SSD_PostProcess_with_plt(resultList, dstFilePath, fileName, index_to_class, confidence_thresh=0.45, kCategoryIndex=2, writer=None, img_idx=None):
    # confidence_thresh = confidence_thresh
    if resultList is not None:


        tensor_bbox = np.reshape(resultList[0], (200, 7))
        img = cv2.imread(fileName)

        img_rows, img_cols, img_channel = img.shape

        idx = np.where(tensor_bbox[:, 2] >= confidence_thresh)
        bbox_keep = tensor_bbox[idx, :]
        # detections = np.empty((0, 6))
        detections = []
        # print('bboxes to keep', bbox_keep)
        for i in range(len(bbox_keep[0])):
            lt_x = int(img_cols * bbox_keep[0][i][3])
            lt_y = int(img_rows * bbox_keep[0][i][4])
            rb_x = int(img_cols * bbox_keep[0][i][5])
            rb_y = int(img_rows * bbox_keep[0][i][6])
            score = bbox_keep[0][i][2]
            cls = int(bbox_keep[0][i][1])
            detection = [lt_x, lt_y, rb_x - lt_x, rb_y - lt_y, score, cls]
            if writer is not None:
                visdrone_format_string = str(img_idx + 1) + ',' + '-1' + ','  + str(lt_x) + ',' + str(lt_y) + ','\
                                         + str(rb_x - lt_x) +',' + str(rb_y - lt_y) + ',' + '{:.3f}'.format(score) + ',' + str(cls) +',-1,-1\n'
                # print visdrone_format_string
                writer.write(visdrone_format_string)
            detections.append(np.array(detection))#.reshape((1, 6)))
        jpegHandler.drew_detection_on_img(np.array(detections), fileName, index_to_class, dstFilePath)
    else:
        print('graph inference failed ')
        return None


def DOSD_PostProcess_with_plt(resultList, dstFilePath, fileName, index_to_class, kCategoryIndex=2):
    confidence_thresh = 0.5
    if resultList is not None:
        tensor_bbox = np.reshape(resultList[0], (200, 7))
        img = cv2.imread(fileName)

        img_rows, img_cols, img_channel = img.shape

        idx = np.where(tensor_bbox[:, 2] >= confidence_thresh)
        bbox_keep = tensor_bbox[idx, :]
        # detections = np.empty((0, 6))
        detections = []
        # print('bboxes to keep', bbox_keep)
        for i in range(len(bbox_keep[0])):
            lt_x = int(img_cols * bbox_keep[0][i][3])
            lt_y = int(img_rows * bbox_keep[0][i][4])
            rb_x = int(img_cols * bbox_keep[0][i][5])
            rb_y = int(img_rows * bbox_keep[0][i][6])
            score = bbox_keep[0][i][2]
            cls = int(bbox_keep[0][i][1]) - 1
            detection = [lt_x, lt_y, rb_x - lt_x, rb_y - lt_y, score, cls]
            detections.append(np.array(detection))#.reshape((1, 6)))
        jpegHandler.drew_detection_on_img(np.array(detections), fileName, index_to_class, dstFilePath)
    else:
        print('graph inference failed ')
        return None


def YOLO3_PostProcess_with_plt(resultList, dstFilePath, fileName, index_to_class, kCategoryIndex=2):
    confidence_thresh = 0.5
    if resultList is not None:
        # 13 * 13
        scaled_anchors0 = np.array([[3.6250, 2.8125],
                                    [4.8750, 6.1875],
                                    [11.6562, 10.1875]])
        # 26 * 26
        scaled_anchors1 = np.array([[1.8750, 3.8125],
                                    [3.8750, 2.8125],
                                    [3.6875, 7.4375]])
        # 52 * 52
        scaled_anchors2 = np.array([[1.2500, 1.6250],
                                    [2.0000, 3.7500],
                                    [4.1250, 2.8750]])

        yolo_out1 = yolo_output(resultList[0], 13, scaled_anchors0).squeeze()
        yolo_out2 = yolo_output(resultList[1], 26, scaled_anchors1).squeeze()
        yolo_out3 = yolo_output(resultList[0], 52, scaled_anchors2).squeeze()

        image_pred = np.concatenate((yolo_out1, yolo_out2, yolo_out3), axis=0)
        k = np.where(image_pred[:, 4] > confidence_thresh)
        image_pred = image_pred[k]
        image_pred[:, : 4] = xywh2xyxy(image_pred[:, : 4])

        # If none are remaining => process next image
        if not image_pred.shape:
            return None
        # Object confidence times class confidence
        try:
            scores = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]
        except:
            return None
        # Sort by it
        image_pred = image_pred[(-scores).argsort()]
        # class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)
        class_confs, class_preds = image_pred[:, 5], np.argmax(image_pred[:, 5:], axis=1)
        detections = np.hstack(
            (image_pred[:, :4], scores.reshape((len(image_pred), 1)), class_preds.reshape((len(image_pred), 1))))
        unique_labels = np.unique(class_preds)
        final = []
        # Perform non-maximum suppression
        for unique_label in unique_labels:
            idx = np.where(detections[:, -1] == unique_label)
            temp = detections[idx][:, :-1]
            num = py_cpu_nms(temp)
            for n in num:
                if detections[idx[0][n]][2] - detections[idx[0][n]][0] > 10. and detections[idx[0][n]][3] - \
                        detections[idx[0][n]][1] > 10.:
                    final.append(detections[idx[0][n]])
        if len(final) == 0:
            return None
        final = np.array(final)

        detections = []
        for i in range(len(final)):
            lt_x = int(final[i][0])
            lt_y = int(final[i][1])
            rb_x = int(final[i][2])
            rb_y = int(final[i][3])
            score = final[i][4]
            cls = int(final[i][-1] + 1)

            detection = [lt_x, lt_y, rb_x - lt_x, rb_y - lt_y, score, cls]
            detections.append(np.array(detection))  # .reshape((1, 6)))
        jpegHandler.drew_detection_on_img(np.array(detections), fileName, index_to_class, dstFilePath)
    else:
        print('graph inference failed ')
        return None