#!/usr/bin/env python 
# -*- coding: utf-8 -*-
# @Time    : 2018/10/23 11:16
# @Author  : Tang Yang
# @Site    : 
# @File    : basic_utils.py
import cv2
import numpy as np
import sys
from skimage import filters
import enum
from src.utils.image_retireval import image_retrieval
from src.utils.context import Context

all_classes = ['Nestle1', 'Nestle10', 'Nestle100', 'Nestle101', 'Nestle102', 'Nestle103', 'Nestle104', 'Nestle105',
               'Nestle106', 'Nestle107', 'Nestle108', 'Nestle109', 'Nestle11', 'Nestle111', 'Nestle112', 'Nestle114',
               'Nestle115', 'Nestle116', 'Nestle117', 'Nestle118', 'Nestle119', 'Nestle12', 'Nestle120', 'Nestle121',
               'Nestle122', 'Nestle123', 'Nestle124', 'Nestle125', 'Nestle126', 'Nestle127', 'Nestle128', 'Nestle129',
               'Nestle13', 'Nestle130', 'Nestle131', 'Nestle132', 'Nestle133', 'Nestle134', 'Nestle136', 'Nestle137',
               'Nestle138', 'Nestle14', 'Nestle15', 'Nestle16', 'Nestle17', 'Nestle18', 'Nestle19', 'Nestle2',
               'Nestle20', 'Nestle22', 'Nestle23', 'Nestle25', 'Nestle26', 'Nestle27', 'Nestle28', 'Nestle29',
               'Nestle3', 'Nestle30', 'Nestle31', 'Nestle32', 'Nestle33', 'Nestle34', 'Nestle35', 'Nestle36',
               'Nestle37', 'Nestle38', 'Nestle39', 'Nestle4', 'Nestle40', 'Nestle41', 'Nestle42', 'Nestle43',
               'Nestle44', 'Nestle45', 'Nestle46', 'Nestle47', 'Nestle48', 'Nestle49', 'Nestle5', 'Nestle50',
               'Nestle51', 'Nestle52', 'Nestle53', 'Nestle54', 'Nestle55', 'Nestle56', 'Nestle57', 'Nestle58',
               'Nestle59', 'Nestle6', 'Nestle60', 'Nestle61', 'Nestle62', 'Nestle63', 'Nestle64', 'Nestle65',
               'Nestle66', 'Nestle67', 'Nestle68', 'Nestle69', 'Nestle7', 'Nestle70', 'Nestle71', 'Nestle72',
               'Nestle73', 'Nestle74', 'Nestle75', 'Nestle76', 'Nestle77', 'Nestle78', 'Nestle79', 'Nestle8',
               'Nestle80', 'Nestle81', 'Nestle82', 'Nestle83', 'Nestle84', 'Nestle85', 'Nestle86', 'Nestle87',
               'Nestle88', 'Nestle89', 'Nestle9', 'Nestle90', 'Nestle97', 'Nestle99', 'gerber1', 'gerber10', 'gerber11',
               'gerber12', 'gerber13', 'gerber14', 'gerber15', 'gerber16', 'gerber17', 'gerber18', 'gerber19',
               'gerber2', 'gerber20', 'gerber21', 'gerber22', 'gerber23', 'gerber24', 'gerber25', 'gerber26',
               'gerber27', 'gerber28', 'gerber29', 'gerber3', 'gerber30', 'gerber31', 'gerber32', 'gerber33',
               'gerber34', 'gerber36', 'gerber4', 'gerber5', 'gerber6', 'gerber7', 'gerber8', 'gerber9', 'nccs1',
               'nccs10', 'nccs11', 'nccs13', 'nccs14', 'nccs15', 'nccs17', 'nccs18', 'nccs19', 'nccs2', 'nccs20',
               'nccs21', 'nccs22', 'nccs23', 'nccs24', 'nccs28', 'nccs29', 'nccs3', 'nccs30', 'nccs31', 'nccs32',
               'nccs33', 'nccs34', 'nccs35', 'nccs36', 'nccs37', 'nccs39', 'nccs4', 'nccs5', 'nccs6', 'nccs7', 'nccs8',
               'nccs9', 'nco1', 'nco10', 'nco100', 'nco101', 'nco102', 'nco103', 'nco105', 'nco106', 'nco107', 'nco108',
               'nco109', 'nco11', 'nco110', 'nco111', 'nco113', 'nco114', 'nco117', 'nco12', 'nco13', 'nco14', 'nco15',
               'nco16', 'nco17', 'nco18', 'nco19', 'nco2', 'nco20', 'nco21', 'nco22', 'nco23', 'nco24', 'nco25',
               'nco26', 'nco27', 'nco28', 'nco29', 'nco3', 'nco30', 'nco31', 'nco32', 'nco33', 'nco34', 'nco35',
               'nco36', 'nco37', 'nco38', 'nco39', 'nco4', 'nco40', 'nco41', 'nco42', 'nco43', 'nco44', 'nco46',
               'nco48', 'nco49', 'nco5', 'nco50', 'nco51', 'nco53', 'nco54', 'nco55', 'nco56', 'nco57', 'nco58',
               'nco59', 'nco6', 'nco60', 'nco61', 'nco63', 'nco64', 'nco65', 'nco66', 'nco68', 'nco69', 'nco7', 'nco70',
               'nco72', 'nco73', 'nco75', 'nco76', 'nco77', 'nco78', 'nco8', 'nco80', 'nco81', 'nco82', 'nco83',
               'nco85', 'nco86', 'nco87', 'nco88', 'nco89', 'nco9', 'nco90', 'nco91', 'nco92', 'nco96', 'nco97',
               'nco99', 'ncon1', 'ncon10', 'ncon11', 'ncon12', 'ncon2', 'ncon3', 'ncon4', 'ncon5', 'ncon6', 'ncon7',
               'ncon8', 'ncon9', 'ncor1', 'ncor10', 'ncor11', 'ncor12', 'ncor13', 'ncor14', 'ncor15', 'ncor16',
               'ncor17', 'ncor18', 'ncor19', 'ncor3', 'ncor4', 'ncor5', 'ncor6', 'ncor7', 'ncor8', 'ncor9', 'nqmg1',
               'nqmg10', 'nqmg11', 'nqmg12', 'nqmg13', 'nqmg14', 'nqmg15', 'nqmg16', 'nqmg1box', 'nqmg2', 'nqmg2box',
               'nqmg3', 'nqmg3box', 'nqmg4', 'nqmg4box', 'nqmg5', 'nqmg5box', 'nqmg6', 'nqmg6box', 'nqmg7', 'nqmg8',
               'nqmg9', 'nric1', 'nric10', 'nric11', 'nric12', 'nric13', 'nric14', 'nric15', 'nric16', 'nric17',
               'nric18', 'nric19', 'nric2', 'nric20', 'nric21', 'nric23', 'nric3', 'nric4', 'nric5', 'nric6', 'nric7',
               'nric8', 'nric9', 'youcui1', 'youcui2', 'youcui3', 'youcui4', 'youcui6']


def get_area_of_polygon(points: list):
    """
    基于向量叉乘计算多边形面积
    """
    assert (len(points) >= 3)
    area = 0

    for i in range(0, len(points) - 1):
        p1 = points[i]
        p2 = points[i + 1]

        tri_area = (p1[0] * p2[1] - p2[0] * p1[1]) / 2
        area += tri_area
    return abs(area)


class DetectedBox:
    def __init__(self, position: list, score: float, label: str):
        """
        ctor
        :param position: 一个包围检测物体的box，形如[[x1, y1], [x2, y2], ...]
        :param score: 这个box的置信度
        :param label: 标签
        """
        self._position = position
        self._score = score
        self._label = label

    @property
    def position(self):
        return self._position

    @position.setter
    def position(self, p: list):
        self._position = p

    @property
    def score(self):
        return self._score

    @score.setter
    def score(self, s):
        self._score = s

    @property
    def label(self):
        return self._label

    @label.setter
    def label(self, l):
        self._label = l

    @property
    def area(self):
        return (self._position[1][0] - self._position[0][0]) * (self._position[3][1] - self._position[0][1])

    def reset(self, position: list, score: float, label: str):
        self._position = position
        self._score = score
        self._label = label

    def __str__(self):
        return str(self._position) + "\t" + str(self._score) + "\t" + str(self._label)

    __repr__ = __str__


class DetectResult:
    """
    检测结果。检测与拼接之间的接口
    """

    def __init__(self, img: np.array, detected_boxes: list):
        """
        ctor
        :param img: 图片
        :param detected_boxes: 检测的所有box，形如[detected_box1, detected_box_2, ...]
        """
        self._img = img
        self._detected_boxes = detected_boxes
        self._is_sorted = False

    def zoom(self, multiple):
        """
        对过大的图片进行缩放处理
        :param multiple:缩放倍数，大于1表示放大，小于1表示缩小
        :return: None
        """
        self._img = cv2.resize(self._img, (int(self._img.shape[1] * multiple),
                                           int(self._img.shape[0] * multiple)),
                               cv2.INTER_LINEAR)  # 对所有的图进行缩放
        for idx, dbox in enumerate(self._detected_boxes):
            for i in range(4):
                self._detected_boxes[idx].position[i][0] = int(dbox.position[i][0] * multiple)
                self._detected_boxes[idx].position[i][1] = int(dbox.position[i][1] * multiple)
        # 对所有的box进行缩放

    def sort_positions(self):
        """
        对所有检测到的box进行行列排序
        :return:
        """
        # 计算检测到的商品的最大高度
        heights = []
        for dbox in self._detected_boxes:
            heights.append(dbox.position[3][1] - dbox.position[0][1])
        max_height = min(heights) / 1  # sum(heights) / len(heights)

        # 按左下角的 y 坐标排序
        self._detected_boxes = sorted(self._detected_boxes, key=lambda x: x.position[3][1])
        ret = []
        start_idx = 0
        for idx, dbox in enumerate(self._detected_boxes):
            # 如果左下角 y 坐标的变化大于最大高度
            if idx == 0:
                continue
            if dbox.position[3][1] - self._detected_boxes[idx - 1].position[-1][1] > max_height:
                # 按左下角 x 坐标排序
                ret.append(sorted(self._detected_boxes[start_idx:idx], key=lambda x: x.position[3][0]))
                start_idx = idx
        ret.append(sorted(self._detected_boxes[start_idx:], key=lambda x: x.position[3][0]))
        # 更新属性值
        self._detected_boxes = ret
        self._is_sorted = True
        return self._detected_boxes

    def insert_box(self, r_and_c: tuple, box: DetectedBox):
        """
        在指定的行列中插入一个box，插入之后不能打乱顺序
        :param r_and_c: 行列tuple
        :param box:
        :return: None
        """
        assert self._is_sorted
        assert r_and_c[0] < len(self._detected_boxes)
        self._detected_boxes[r_and_c[0]].insert(r_and_c[1], box)
        self._detected_boxes[r_and_c[0]] = sorted(self._detected_boxes[r_and_c[0]], key=lambda x: x.position[-1][0])

    @property
    def img(self):
        return self._img

    @property
    def positions(self):
        if not self._is_sorted:
            return [i.position for i in self._detected_boxes]
        else:
            return [box.position for row_box in self._detected_boxes for box in row_box]

    @property
    def scores(self):
        if not self._is_sorted:
            return [i.score for i in self._detected_boxes]
        else:
            return [box.score for row_box in self._detected_boxes for box in row_box]

    @property
    def labels(self):
        if not self._is_sorted:
            return [i.label for i in self._detected_boxes]
        else:
            return [box.label for row_box in self._detected_boxes for box in row_box]

    @property
    def row_col_info(self):
        if not self._is_sorted:
            return []
        ret = []
        for idx, box in enumerate(self._detected_boxes):
            ret += [[idx + 1, col + 1] for col in range(len(box))]
        return ret

    @property
    def detected_boxes(self):
        return self._detected_boxes

    @detected_boxes.setter
    def detected_boxes(self, boxes):
        self._detected_boxes = boxes

    def show(self):
        tmp_img = self._img.copy()
        for label, r_and_c, position in zip(self.labels, self.row_col_info, self.positions):
            pos = (int(position[0][0]), int(position[0][1]))
            color = (0, 255, 0)
            if label == "miss":
                color = (0, 0, 255)
            elif label == "unknown":
                color = (0, 255, 255)
            elif label == "added":
                color = (255, 0, 0)
            cv2.polylines(tmp_img, np.array([position], dtype=np.int32),
                          1, color, thickness=2)
            # cv2.putText(tmp_img, label, pos, cv2.FONT_ITALIC, 0.4, color, 1)
            # cv2.putText(tmp_img, str(r_and_c[0]) + ":" + str(r_and_c[1]),
            #             (pos[0], pos[1] + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255), 1)
        # cv2.polylines(tmp_img, np.array(self.positions, dtype=np.int32),
        #               1, (0, 255, 0), thickness=2)
        tmp_img = cv2.cvtColor(tmp_img, cv2.COLOR_RGB2BGR)
        return tmp_img


class SkuSpecies:
    """
    表示某一类SKU
    """

    def __init__(self, img_shape, name, desc, positions, scores, label, row_col_info):
        self._img_shape = img_shape
        self._positions = positions
        self._name = name
        self._desc = desc
        self._scores = scores
        self._labels = label
        self._row_col_info = row_col_info
        self._areas = self._compute_areas()
        self._ratio = sum(self._areas) / (img_shape[0] * img_shape[1])
        self._list_rows = list(set([i[0] for i in row_col_info]))
        self._crops = self._get_crops()

    def _compute_areas(self):
        ret = []
        for pos in self._positions:  # pos: [[x, y], [x, y], [x, y], ...]
            ret.append(get_area_of_polygon(pos))
        return ret

    def _get_crops(self):
        ret = []
        for idx, pos in enumerate(self._positions):
            ret.append(dict(image_ptr=idx,
                            rows=self._row_col_info[idx][0],
                            cols=self._row_col_info[idx][1],
                            x=int(pos[0][0]), y=int(pos[0][1]),
                            height=int(pos[2][1] - pos[0][1]), width=int(pos[2][0] - pos[0][0]),
                            score=float(self._scores[idx]), roi_area=int(self._areas[idx]), region=1))
        return ret

    @property
    def goods_name(self):
        return self._name

    @property
    def goods_desc(self):
        return self._desc

    @property
    def ratio(self):
        return self._ratio

    @property
    def num(self):
        return len(self._scores)

    @property
    def is_show(self):
        return False

    @property
    def list_rows(self):
        return self._list_rows

    @property
    def produce(self):
        return 0

    @property
    def crops(self):
        return self._crops


class GuessType(enum.Enum):
    """
    0:缺货
    1：右边
    2：左边
    3：未知类型
    """
    stockout = 0
    right_type = 1
    left_type = 2
    unknown_type = 3


def get_feature(mat_1, mat_2):
    sift = cv2.xfeatures2d.SIFT_create()
    # cv2.imshow("mat-1", mat_1)
    # cv2.imshow("mat-2", mat_2)
    # cv2.waitKey()
    kp1, des1 = sift.detectAndCompute(mat_1, None)
    kp2, des2 = sift.detectAndCompute(mat_2, None)
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    if des1 is None or des2 is None:
        return 0
    if len(des1) < 2 or len(des2) < 2:
        return 0
    else:
        matches = flann.knnMatch(des1, des2, k=2)
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)
        return len(good)


def sd_pro(img):
    pro = []
    for i in range(img.shape[1]):
        pro_1 = []
        for j in range(img.shape[0]):
            pro_1.append(img[j][i])
        pro.append(sum(pro_1))
    return pro


def grad_dec(mat_1):
    mat_1 = cv2.cvtColor(mat_1, cv2.COLOR_BGR2GRAY)
    x = cv2.Sobel(mat_1, cv2.CV_16S, 1, 0)
    y = cv2.Sobel(mat_1, cv2.CV_16S, 0, 1)
    absX = cv2.convertScaleAbs(x)
    absY = cv2.convertScaleAbs(y)
    dst = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
    thresh_img = filters.threshold_otsu(dst)
    ret, dst = cv2.threshold(dst, thresh_img, 255, cv2.THRESH_BINARY)
    # cv2.imshow("mat_1", dst)
    # cv2.waitKey()
    pro = sd_pro(dst)
    cnt = 0
    a = 0
    for i in range(len(pro)):
        if pro[i] < 255 * mat_1.shape[0] / 8:
            cnt = cnt + 1
    return cnt


def stockout_dec(mat_0, mat_1, mat_2):
    """
    缺货漏货检测
    :param mat_0: 左边的boundingbox
    :param mat_1: 未知区域
    :param mat_2: 右边的boundingbox
    :return: 返回GuessType枚举类型和个数
    """
    if mat_0 is None:
        if get_feature(mat_1, mat_2) > 10:
            return GuessType.right_type, 1
        else:
            mat_1d = grad_dec(mat_1)
            if mat_1d > mat_1.shape[1] / 2:
                return GuessType.stockout, 1
            else:
                return GuessType.unknown_type, 1
    elif mat_2 is None:
        if get_feature(mat_1, mat_0) > 10:
            return GuessType.left_type, 1
        else:
            mat_1d = grad_dec(mat_1)
            if mat_1d > mat_1.shape[1] / 2:
                return GuessType.stockout, 1
            else:
                return GuessType.unknown_type, 1
    else:
        n_1 = get_feature(mat_0, mat_1)
        n_2 = get_feature(mat_1, mat_2)
        if n_1 > 10 or n_2 > 10:
            if n_1 > n_2:
                n = mat_1.shape[1] / mat_0.shape[1]
                if n < 1:
                    return GuessType.left_type, 1
                else:
                    return GuessType.left_type, int(n)
            else:
                n = mat_1.shape[1] / mat_2.shape[1]
                if n < 1:
                    return GuessType.right_type, 1
                else:
                    return GuessType.right_type, int(n)
        else:
            mat_1d = grad_dec(mat_1)
            if mat_1d > mat_1.shape[1] / 2:
                return GuessType.stockout, 1
            else:
                return GuessType.unknown_type, 1


def mat_inter(box1, box2):
    # 判断两个矩形是否相交
    # box=(xA,yA,xB,yB)
    x01, y01, x02, y02 = box1
    x11, y11, x12, y12 = box2

    lx = abs((x01 + x02) / 2 - (x11 + x12) / 2)
    ly = abs((y01 + y02) / 2 - (y11 + y12) / 2)
    sax = abs(x01 - x02)
    sbx = abs(x11 - x12)
    say = abs(y01 - y02)
    sby = abs(y11 - y12)
    if lx <= (sax + sbx) / 2 and ly <= (say + sby) / 2:
        return True
    else:
        return False


def solve_coincide(box1, box2):
    # box=(xA,yA,xB,yB)
    # 计算两个矩形框的重合度
    if mat_inter(box1, box2):
        x01, y01, x02, y02 = box1
        x11, y11, x12, y12 = box2
        col = min(x02, x12) - max(x01, x11)
        row = min(y02, y12) - max(y01, y11)
        intersection = col * row
        area1 = (x02 - x01) * (y02 - y01)
        area2 = (x12 - x11) * (y12 - y11)
        coincide = intersection / min(area1, area2)
        return coincide
    else:
        return False


def unique(db, db_list):
    if len(db_list) == 0:
        db_list.append(db)
    else:
        flag = True
        need_del_indexes = []
        for idx, db_box in enumerate(db_list):
            if solve_coincide((db.position[0][0], db.position[0][1], db.position[2][0], db.position[2][1]),
                              (db_box.position[0][0], db_box.position[0][1], db_box.position[2][0],
                               db_box.position[2][1])) > 0.1:
                # if abs(db.position[0][0]-1424) < 10 or abs(db_box.position[0][0]-1424) < 10:
                #     print(11111)
                if db.score <= db_box.score:
                    need_del_indexes.append(idx)
                else:
                    flag = False
                    break
        need_del_indexes.sort(reverse=True)
        for idx in need_del_indexes:
            del db_list[idx]
        if flag:
            db_list.append(db)

    return db_list


def is_miss(src):
    gray = cv2.cvtColor(src, cv2.COLOR_RGB2GRAY)
    th = filters.threshold_otsu(gray)
    edge = cv2.Canny(gray, th / 3, th)
    cnt = 0
    for i in range(edge.shape[0]):
        for j in range(edge.shape[1]):
            if edge[i, j] > 125:
                cnt += 1
    if cnt < edge.shape[0] * edge.shape[1] / 10:
        return True
    else:
        r, g, b = cv2.split(src)
        mat = np.zeros(gray.shape)
        for i in range(edge.shape[0]):
            for j in range(edge.shape[1]):
                mat[i, j] = np.var([r[i, j], g[i, j], b[i, j]])
        var = np.var(mat)
        if var < 10000:
            return True
        else:
            return False


def img_generator(img, bbox, scaling_ratio=3, method="ALL"):
    """

    :param scaling_ratio:
    :param img:
    :param bbox: [[x, y], [x, y], ...]
    :param method: "UP_BOTTOM", "LEFT_RIGHT", "ALL"
                    ALL表示上下和左右
    :return: 如果method="UP_BOTTOM"，返回[upper_img, bottom_img], x_start, x_end；分别表示上部和下部图片，图片在原图中的起始和结束x坐标
              如果method="LEFT_RIGHT", 返回[left_img, right_img], y_start, y_end；分别表示左边和右边的图片，图片在原图中的起始和结束y坐标
              如果method="ALL"，返回整张原图
    """
    if method == "UP_BOTTOM":
        bbox_width = bbox[1][0] - bbox[0][0]
        padding = bbox_width * (scaling_ratio - 1) / 2
        x_start = int(bbox[0][0] - padding)
        x_start = x_start if x_start >= 0 else 0
        x_end = int(bbox[1][0] + padding)
        x_end = x_end if x_end <= img.shape[1] else img.shape[1]
        upper_img = img[0:bbox[0][1], x_start:x_end]
        bottom_img = img[bbox[3][1]:, x_start:x_end]
        return [upper_img, bottom_img], x_start, x_end
    elif method == "LEFT_RIGHT":
        bbox_height = bbox[3][1] - bbox[0][1]
        padding = bbox_height * (scaling_ratio - 1) / 2
        y_start = int(bbox[0][1] - padding)
        y_start = y_start if y_start >= 0 else 0
        y_end = int(bbox[3][1] + padding)
        y_end = y_end if y_end <= img.shape[0] else img.shape[0]
        left_img = img[y_start:y_end, 0:bbox[0][0]]
        right_img = img[y_start:y_end, bbox[1][0]:]
        return [left_img, right_img], y_start, y_end
    elif method == "ALL":
        return img


def template(img, method, threshold=None):
    # 模板图片
    tpl = img[0]
    # 目标图片
    target = img[1]
    # cv2.imshow('template', tpl)

    # 获得模板的高宽
    th, tw = tpl.shape[:2]

    # 执行模板匹配
    # target：目标图片
    # tpl：模板图片
    # 匹配模式
    # print(target.shape, tpl.shape)
    # cv2.imshow("target", target)
    # cv2.imshow("tpl", tpl)
    # cv2.waitKey()
    if target.shape[0] < tpl.shape[0] or target.shape[1] < tpl.shape[1]:
        return []
    result = cv2.matchTemplate(target, tpl, method)  # result 是一个矩阵 保存 原图在该位置匹配的相似度
    box_list = []  # 保存候选框信息
    loc = np.where(result > threshold)  # 返回值是（行，列）=> (Y,X)
    for tl in zip(loc[1], loc[0]):  # 或*list解包裹
        br = (tl[0] + tw, tl[1] + th)
        # cv2.rectangle(target, tl, br, (0, 0, 255), 2)
        # cv2.imshow('match_result', target)

        box = [tl[0], tl[1], br[0], br[1], 100 * result[tl[1], tl[0]]]
        box_list.append(box)
    return box_list
    # else:
    #     # 寻找矩阵中最小值和最大值的位置
    #     min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
    #     print(min_val, max_val, min_loc, max_loc)
    #     tl = max_loc
    #     br = (tl[0] + tw, tl[1] + th)
    #     # 绘制矩形边框，将匹配区域标注出来
    #     # target：目标图像
    #     # tl：左上坐标（X,Y）
    #     # br：右下坐标（X,Y）
    #     # (0,0,255)：矩形边框颜色
    #     # 2：矩形边框大小
    #     cv2.rectangle(target, tl, br, (0, 0, 255), 2)
    #     cv2.imshow('match_result' + np.str(method), target)


def py_nms(dets, thresh):
    """Pure Python NMS baseline."""
    # x1、y1、x2、y2、以及score赋值
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]

    # 每一个候选框的面积
    areas = (x2 - x1 + 1) * (y2 - y1 + 1)

    # order是按照score升序排序的
    order = scores.argsort()[::1]

    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)
        # 计算当前概率最大矩形框与其他矩形框的相交框的坐标，会用到numpy的broadcast机制，得到的是向量
        xx1 = np.maximum(x1[i], x1[order[1:]])
        yy1 = np.maximum(y1[i], y1[order[1:]])
        xx2 = np.minimum(x2[i], x2[order[1:]])
        yy2 = np.minimum(y2[i], y2[order[1:]])

        # 计算相交框的面积,注意矩形框不相交时w或h算出来会是负数，用0代替
        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h

        # 计算重叠度IOU：重叠面积/（面积1+面积2-重叠面积）
        ovr = inter / (areas[i] + areas[order[1:]] - inter)

        # 找到重叠度不高于阈值的矩形框索引
        inds = np.where(ovr <= thresh)[0]

        # 将order序列更新，由于前面得到的矩形框索引要比矩形框在原order序列中的索引小1，所以要把这个1加回来
        order = order[inds + 1]
    return keep


def generator_box(small_im, large_im, thresh_1=0.75, thresh2=0.2, methods=cv2.TM_CCOEFF_NORMED):
    box_list = []
    # 模板匹配 找出候选框 并转化为array
    dets = np.array(template((small_im, large_im), threshold=thresh_1, method=methods), dtype=np.int64)
    if dets.shape[0] == 0:
        return box_list
    # print(dets.shape)

    # 筛选候选框
    keep_dets = py_nms(dets, thresh2)  # 得到筛选后的候选框索引
    f_dets = dets[keep_dets]

    target = large_im
    for i in range(len(f_dets)):
        # 单个图片的box_list
        box_list.append(DetectedBox([[f_dets[i][0], f_dets[i][1]],
                                     [f_dets[i][2], f_dets[i][1]],
                                     [f_dets[i][2], f_dets[i][3]],
                                     [f_dets[i][0], f_dets[i][3]]],
                                    f_dets[i][4], ""))
        # 展示
        # print('No.%d box:' % (i + 1), (f_dets[i][0], f_dets[i][1]), (f_dets[i][2], f_dets[i][1]))
        # cv2.rectangle(target, (f_dets[i][0], f_dets[i][1]), (f_dets[i][2], f_dets[i][3]), (0 + 40 * i, 0 + 70 * i, 255),
        #               2)
        # cv2.imshow('match_result_NMS', target)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    return box_list


def template_match(all_imgs, small_img):
    all_img_box_list = []
    for img in all_imgs:
        box_list = generator_box(small_img, img)
        all_img_box_list.append(box_list)

    return all_img_box_list


def complement_square_2(detect_result: DetectResult):
    img = detect_result.img

    mask_img = np.zeros((img.shape[0], img.shape[1]))
    detect_boxes = detect_result.detected_boxes

    for idx, detectBox in enumerate(detect_boxes):  # 遍历货架的每一层
        position = detectBox.position
        l_up = [int(i) for i in position[0]]
        r_down = [int(i) // 1 for i in position[2]]
        r_up = [int(i) // 1 for i in position[1]]
        l_down = [int(i) // 1 for i in position[3]]

        detectBox.position = [l_up, r_up, r_down, l_down]
        mask_img[l_up[1] // 1: l_down[1] // 1, l_up[0]:r_up[0]] = 1

    add_list = []
    for detect_box in detect_boxes:
        if detect_box.score < 0.9:
            continue
        imgs, y_start, y_end = img_generator(img, detect_box.position, method="LEFT_RIGHT")
        left_img, right_img = imgs
        # cv2.imshow("left_img", left_img)
        # cv2.imshow("right_img", right_img)
        # cv2.waitKey()
        imgs, x_start, x_end = img_generator(img, detect_box.position, method="UP_BOTTOM")
        up_img, bottom_img = imgs
        small_img = img[detect_box.position[0][1]:detect_box.position[3][1],
                    detect_box.position[0][0]:detect_box.position[1][0]]
        left_up_corners = [[0, y_start], [detect_box.position[0][0], y_start],
                           [x_start, 0], [x_start, detect_box.position[3][1]]]  # 四个小图片在原图中左上角坐标
        all_added = template_match([left_img, right_img, up_img, bottom_img], small_img)
        print("alladded:", all_added)
        for i in range(len(all_added)):
            added_boxes = all_added[i]
            left_up_corner = left_up_corners[i]
            for box in added_boxes:
                l_up = [box.position[0][0] + left_up_corner[0], box.position[0][1] + left_up_corner[1]]
                r_up = [box.position[1][0] + left_up_corner[0], box.position[1][1] + left_up_corner[1]]
                r_down = [box.position[2][0] + left_up_corner[0], box.position[2][1] + left_up_corner[1]]
                l_down = [box.position[3][0] + left_up_corner[0], box.position[3][1] + left_up_corner[1]]
                s = np.sum(mask_img[l_up[1] // 1: l_down[1] // 1, l_up[0]:r_up[0]]) / box.area
                if s > 0.2:
                    continue
                add_list.append(DetectedBox([l_up, r_up, r_down, l_down], box.score, "added"))
                # detect_box.label))

    dest = []
    for box in add_list:
        print(box)
        dest.append([box.position[0][0], box.position[0][1],
                     box.position[2][0], box.position[2][1], box.score])
    # if len(line) != 0:
    #     dest.append(np.array(line))
    all_img_box_list = []
    if len(dest) != 0:
        dest = np.array(dest)
        print(dest.shape)
        print(dest)
        keep_dets = py_nms(dest, 0.2)
        f_dets = dest[keep_dets]
        print(f_dets.shape)
        print("f_dets: ", f_dets)
        for i in range(len(f_dets)):
            # 单个图片的box_list
            all_img_box_list.append(DetectedBox([[f_dets[i][0], f_dets[i][1]],
                                                 [f_dets[i][2], f_dets[i][1]],
                                                 [f_dets[i][2], f_dets[i][3]],
                                                 [f_dets[i][0], f_dets[i][3]]],
                                                f_dets[i][4] / 100, add_list[keep_dets[i]].label))
    print("lalalala: ", len(all_img_box_list))
    print(all_img_box_list)
    detect_boxes += all_img_box_list

    return DetectResult(img, detect_boxes)


def complement_square(detectResult: DetectResult, context: Context):
    img = detectResult.img

    new_img = np.zeros((img.shape[0], img.shape[1]))
    detectBoxes = detectResult._detected_boxes

    xmin = sys.float_info.max
    xmax = 0
    ymin = sys.float_info.max
    ymax = 0

    shape = context.shape
    layer = context.layer
    model = context.vgg_model
    something = context.joblib

    """
    求得整个货架的最大和最小x、y的值， 为了后期可以处理边缘缺省的情况
     """
    for idx, detectBox in enumerate(detectBoxes):  # 遍历货架的每一层
        position = detectBox.position
        l_up = [int(i) for i in position[0]]
        r_down = [int(i) // 1 for i in position[2]]
        r_up = [int(i) // 1 for i in position[1]]
        l_down = [int(i) // 1 for i in position[3]]

        detectBox.position = [l_up, r_up, r_down, l_down]

        new_img[l_up[1] // 1: l_down[1] // 1, l_up[0]:r_up[0]] = 1
        x_min = min(l_up[0], l_down[0])
        x_max = max(r_up[0], r_down[0])
        y_min = min(l_up[1], l_down[1])
        y_max = max(r_up[1], r_down[1])

        if x_min < xmin:
            xmin = x_min
        if x_max > xmax:
            xmax = x_max
        if y_min < ymin:
            ymin = y_min
        if y_max > ymax:
            ymax = y_max

    # cv2.imshow("new_img", new_img)
    db_list = []
    init_strides = [-15, -8, 0, 8, 15]
    for count, detectBox in enumerate(detectBoxes):  # 遍历货架的每一个
        position = detectBox.position
        l_up = position[0]
        r_down = position[2]
        r_up = position[1]
        l_down = position[3]

        box_height = l_down[1] - l_up[1]
        box_width = r_up[0] - l_up[0]

        # 从中间往左边
        n = 1
        final_strides = None
        while l_up[0] - n * box_width > xmin:
            yuzhi = np.sum(new_img[l_up[1]:l_down[1], l_up[0] - n * box_width:l_up[0] - (n - 1) * box_width]) / (
                    (l_down[1] - l_up[1]) * box_width)
            if yuzhi > 0.3:
                break
            dboxs = []
            all_similars = []
            strides = init_strides if final_strides is None else [final_strides]
            for stride in strides:
                if l_up[1] + stride < 0 or l_down[1] + stride > img.shape[0]:
                    dboxs.append(DetectedBox([], 0, detectBox.label))
                    all_similars.append(0)
                    continue
                detect_box_l_up = (l_up[0] - n * box_width, l_up[1] + stride if l_up[1] + stride > 0 else 0)
                detect_box_r_up = (l_up[0] - (n - 1) * box_width, l_up[1] + stride)
                detect_box_l_down = (l_up[0] - n * box_width, l_down[1] + stride)
                detect_box_r_down = (l_up[0] - (n - 1) * box_width, l_down[1] + stride)
                first = img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]]
                second = img[l_up[1]:l_down[1], l_up[0]:r_up[0]]
                # cv2.imshow("first", first)
                # cv2.imshow("second", second)
                # cv2.waitKey()
                similar = image_retrieval(
                    [img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]],
                     img[l_up[1]:l_down[1], l_up[0]:r_up[0]]], model, shape, layer, something)[0][0]
                # print(similar)
                db = DetectedBox([detect_box_l_up, detect_box_r_up, detect_box_r_down, detect_box_l_down],
                                 similar, "added")
                                 # detectBox.label)
                if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
                    db.score = 0
                dboxs.append(db)
                all_similars.append(similar)
            max_similars = max(all_similars)
            max_index = all_similars.index(max_similars)
            if final_strides is None:
                final_strides = strides[max_index]
            db = dboxs[max_index]
            if max_similars < 0.85:
                if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
                    db.label = 'miss'
                else:
                    db.label = 'unknown'

            db_list = unique(db, db_list)
            n += 1

        # 从中间往右边
        n = 1
        final_strides = None
        while r_up[0] + n * box_width < xmax:
            yuzhi = np.sum(new_img[l_up[1]:l_down[1], l_up[0] + n * box_width:l_up[0] + (n + 1) * box_width]) / (
                    (l_down[1] - l_up[1]) * box_width)
            if yuzhi > 0.3:
                break
            dboxs = []
            all_similars = []
            strides = init_strides if final_strides is None else [final_strides]
            for stride in strides:
                if l_up[1] + stride < 0 or l_down[1] + stride > img.shape[0]:
                    dboxs.append(DetectedBox([], 0, detectBox.label))
                    all_similars.append(0)
                    continue
                detect_box_l_up = (l_up[0] + n * box_width, l_up[1] + stride)
                detect_box_r_up = (l_up[0] + (n + 1) * box_width, l_up[1] + stride)
                detect_box_l_down = (l_up[0] + n * box_width, l_down[1] + stride)
                detect_box_r_down = (l_up[0] + (n + 1) * box_width, l_down[1] + stride)
                first = img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]]
                second = img[l_up[1]:l_down[1], l_up[0]:r_up[0]]
                # cv2.imshow("first", first)
                # cv2.imshow("second", second)
                # cv2.waitKey()
                similar = image_retrieval(
                    [img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]],
                     img[l_up[1]:l_down[1], l_up[0]:r_up[0]]], model, shape, layer, something)[0][0]
                # print(similar)
                db = DetectedBox([detect_box_l_up, detect_box_r_up, detect_box_r_down, detect_box_l_down],
                                 similar, "added")
                                 # detectBox.label)
                if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
                    db.score = 0
                dboxs.append(db)
                all_similars.append(similar)
            max_similars = max(all_similars)
            max_index = all_similars.index(max_similars)
            if final_strides is None:
                final_strides = init_strides[max_index]
            db = dboxs[max_index]
            if max_similars < 0.85:
                if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
                    db.label = 'miss'
                else:
                    db.label = 'unknown'

            db_list = unique(db, db_list)
            n += 1

    detectBoxes += db_list

    return DetectResult(img, detectBoxes)


def complement_square_3(detectResult: DetectResult):
    img = detectResult.img

    new_img = np.zeros((img.shape[0], img.shape[1]))
    detectBoxes = detectResult._detected_boxes
    print(detectBoxes)

    xmin = sys.float_info.max
    xmax = 0
    ymin = sys.float_info.max
    ymax = 0

    shape = (128, 128, 3)
    layer = 'block4_conv4'
    base_model = VGG19(weights='imagenet', include_top=False, input_shape=shape)
    model = Model(inputs=base_model.input, outputs=base_model.get_layer(layer).output)
    something = joblib.load(r'D:\workspace\Shelf-product-identification\PCA_parameter.pkl')

    """
    求得整个货架的最大和最小x、y的值， 为了后期可以处理边缘缺省的情况
     """
    for idx, detectBox in enumerate(detectBoxes):  # 遍历货架的每一层
        position = detectBox.position
        l_up = [int(i) for i in position[0]]
        r_down = [int(i) // 1 for i in position[2]]
        r_up = [int(i) // 1 for i in position[1]]
        l_down = [int(i) // 1 for i in position[3]]

        detectBox.position = [l_up, r_up, r_down, l_down]

        new_img[l_up[1] // 1: l_down[1] // 1, l_up[0]:r_up[0]] = 1
        x_min = min(l_up[0], l_down[0])
        x_max = max(r_up[0], r_down[0])
        y_min = min(l_up[1], l_down[1])
        y_max = max(r_up[1], r_down[1])

        if x_min < xmin:
            xmin = x_min
        if x_max > xmax:
            xmax = x_max
        if y_min < ymin:
            ymin = y_min
        if y_max > ymax:
            ymax = y_max

    # cv2.imshow("new_img", new_img)
    db_list = []
    init_strides = [-15, -8, 0, 8, 15]
    for count, detectBox in enumerate(detectBoxes):  # 遍历货架的每一个
        print("--------------------------------------------------")
        print(str(count + 1) + " / " + str(len(detectBoxes)))
        print("--------------------------------------------------")
        position = detectBox.position
        l_up = position[0]
        r_down = position[2]
        r_up = position[1]
        l_down = position[3]

        box_height = l_down[1] - l_up[1]
        box_width = r_up[0] - l_up[0]

        # 从中间往左边
        n = 1
        final_strides = None
        while l_up[0] - n * box_width > xmin:
            yuzhi = np.sum(new_img[l_up[1]:l_down[1], l_up[0] - n * box_width:l_up[0] - (n - 1) * box_width]) / (
                    (l_down[1] - l_up[1]) * box_width)
            if yuzhi > 0.3:
                break
            dboxs = []
            all_similars = []
            strides = init_strides if final_strides is None else [final_strides]
            for stride in strides:
                if l_up[1] + stride < 0 or l_down[1] + stride > img.shape[0]:
                    dboxs.append(DetectedBox([], 999999, detectBox.label))
                    all_similars.append(999999)
                    continue
                detect_box_l_up = (l_up[0] - n * box_width, l_up[1] + stride if l_up[1] + stride > 0 else 0)
                detect_box_r_up = (l_up[0] - (n - 1) * box_width, l_up[1] + stride)
                detect_box_l_down = (l_up[0] - n * box_width, l_down[1] + stride)
                detect_box_r_down = (l_up[0] - (n - 1) * box_width, l_down[1] + stride)
                first = img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]]
                second = img[l_up[1]:l_down[1], l_up[0]:r_up[0]]
                # cv2.imshow("first", first)
                # cv2.imshow("second", second)
                # cv2.waitKey()
                similar = image_retrieval(
                    [img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]],
                     img[l_up[1]:l_down[1], l_up[0]:r_up[0]]], model, shape, layer, something)[0][0]
                # print(similar)
                db = DetectedBox([detect_box_l_up, detect_box_r_up, detect_box_r_down, detect_box_l_down],
                                 similar,  # "added")
                                 detectBox.label)
                # if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
                #     db.score = 999999
                dboxs.append(db)
                all_similars.append(similar)
            min_similars = min(all_similars)
            min_index = all_similars.index(min_similars)
            if final_strides is None:
                final_strides = strides[min_index]
            db = dboxs[min_index]
            if min_similars > 1.1:
                print(db.position)
                if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
                    db.label = 'miss'
                else:
                    db.label = 'unknown'

            db_list = unique(db, db_list)
            n += 1

        # 从中间往右边
        n = 1
        final_strides = None
        while r_up[0] + n * box_width < xmax:
            yuzhi = np.sum(new_img[l_up[1]:l_down[1], l_up[0] + n * box_width:l_up[0] + (n + 1) * box_width]) / (
                    (l_down[1] - l_up[1]) * box_width)
            if yuzhi > 0.3:
                break
            dboxs = []
            all_similars = []
            strides = init_strides if final_strides is None else [final_strides]
            for stride in strides:
                if l_up[1] + stride < 0 or l_down[1] + stride > img.shape[0]:
                    dboxs.append(DetectedBox([], 999999, detectBox.label))
                    all_similars.append(999999)
                    continue
                detect_box_l_up = (l_up[0] + n * box_width, l_up[1] + stride)
                detect_box_r_up = (l_up[0] + (n + 1) * box_width, l_up[1] + stride)
                detect_box_l_down = (l_up[0] + n * box_width, l_down[1] + stride)
                detect_box_r_down = (l_up[0] + (n + 1) * box_width, l_down[1] + stride)
                first = img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]]
                second = img[l_up[1]:l_down[1], l_up[0]:r_up[0]]
                # cv2.imshow("first", first)
                # cv2.imshow("second", second)
                # cv2.waitKey()
                similar = image_retrieval(
                    [img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]],
                     img[l_up[1]:l_down[1], l_up[0]:r_up[0]]], model, shape, layer, something)[0][0]
                # print(similar)
                db = DetectedBox([detect_box_l_up, detect_box_r_up, detect_box_r_down, detect_box_l_down],
                                 similar,  # "added")
                                 detectBox.label)
                # if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
                #     db.score = 999999
                dboxs.append(db)
                all_similars.append(similar)
            min_similars = min(all_similars)
            min_index = all_similars.index(min_similars)
            if final_strides is None:
                final_strides = init_strides[min_index]
            db = dboxs[min_index]
            if min_similars > 1.1:
                if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
                    db.label = 'miss'
                else:
                    db.label = 'unknown'

            db_list = unique(db, db_list)
            n += 1

    detectBoxes += db_list

    return DetectResult(img, detectBoxes)


def desnet_classify(model, img):
    tmp_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    cv2.imshow("tmp", tmp_img)
    cv2.waitKey()
    img = cv2.resize(img, (224, 224))  # resize
    img = img / 255  # normalization
    img = img.astype(np.float32)  # keras format
    im = img.reshape((1, 224, 224, 3))  # batch_size
    a = model.predict(im)
    max_index = np.argmax(a)
    print("------")
    print(all_classes[max_index], np.max(a))
    return np.max(a), all_classes[max_index]


# def complement_square_with_classifier(detectResult: DetectResult):
#     desNet = models.load_model(r'D:\workspace\model_weight.h5')
#     img = detectResult.img
#
#     new_img = np.zeros((img.shape[0], img.shape[1]))
#     detectBoxes = detectResult._detected_boxes
#     print(detectBoxes)
#
#     xmin = sys.float_info.max
#     xmax = 0
#     ymin = sys.float_info.max
#     ymax = 0
#
#     shape = (128, 128, 3)
#     layer = 'block4_conv4'
#     base_model = VGG19(weights='imagenet', include_top=False, input_shape=shape)
#     model = Model(inputs=base_model.input, outputs=base_model.get_layer(layer).output)
#     something = joblib.load(r'D:\workspace\Shelf-product-identification\PCA_parameter.pkl')
#
#     """
#     求得整个货架的最大和最小x、y的值， 为了后期可以处理边缘缺省的情况
#      """
#     for idx, detectBox in enumerate(detectBoxes):  # 遍历货架的每一层
#         position = detectBox.position
#         l_up = [int(i) for i in position[0]]
#         r_down = [int(i) // 1 for i in position[2]]
#         r_up = [int(i) // 1 for i in position[1]]
#         l_down = [int(i) // 1 for i in position[3]]
#
#         detectBox.position = [l_up, r_up, r_down, l_down]
#
#         new_img[l_up[1] // 1: l_down[1] // 1, l_up[0]:r_up[0]] = 1
#         x_min = min(l_up[0], l_down[0])
#         x_max = max(r_up[0], r_down[0])
#         y_min = min(l_up[1], l_down[1])
#         y_max = max(r_up[1], r_down[1])
#
#         if x_min < xmin:
#             xmin = x_min
#         if x_max > xmax:
#             xmax = x_max
#         if y_min < ymin:
#             ymin = y_min
#         if y_max > ymax:
#             ymax = y_max
#
#     # cv2.imshow("new_img", new_img)
#     db_list = []
#     init_strides = [-15, -8, 0, 8, 15]
#     for count, detectBox in enumerate(detectBoxes):  # 遍历货架的每一个
#         print("--------------------------------------------------")
#         print(str(count + 1) + " / " + str(len(detectBoxes)))
#         print("--------------------------------------------------")
#         position = detectBox.position
#         l_up = position[0]
#         r_down = position[2]
#         r_up = position[1]
#         l_down = position[3]
#
#         box_height = l_down[1] - l_up[1]
#         box_width = r_up[0] - l_up[0]
#
#         # 从中间往左边
#         n = 1
#         final_strides = None
#         while l_up[0] - n * box_width > xmin:
#             yuzhi = np.sum(new_img[l_up[1]:l_down[1], l_up[0] - n * box_width:l_up[0] - (n - 1) * box_width]) / (
#                     (l_down[1] - l_up[1]) * box_width)
#             if yuzhi > 0.3:
#                 break
#             dboxs = []
#             all_similars = []
#             strides = init_strides if final_strides is None else [final_strides]
#             for stride in strides:
#                 if l_up[1] + stride < 0 or l_down[1] + stride > img.shape[0]:
#                     dboxs.append(DetectedBox([], 0, detectBox.label))
#                     all_similars.append(0)
#                     continue
#                 detect_box_l_up = (l_up[0] - n * box_width, l_up[1] + stride if l_up[1] + stride > 0 else 0)
#                 detect_box_r_up = (l_up[0] - (n - 1) * box_width, l_up[1] + stride)
#                 detect_box_l_down = (l_up[0] - n * box_width, l_down[1] + stride)
#                 detect_box_r_down = (l_up[0] - (n - 1) * box_width, l_down[1] + stride)
#                 first = img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]]
#                 second = img[l_up[1]:l_down[1], l_up[0]:r_up[0]]
#                 similar, label = desnet_classify(desNet, first)
#
#                 # cv2.imshow("first", first)
#                 # cv2.imshow("second", second)
#                 # cv2.waitKey()
#                 # similar = image_retrieval(
#                 #     [img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]],
#                 #      img[l_up[1]:l_down[1], l_up[0]:r_up[0]]], model, shape, layer, something)[0][0]
#                 # print(similar)
#                 db = DetectedBox([detect_box_l_up, detect_box_r_up, detect_box_r_down, detect_box_l_down],
#                                  similar,  "added")
#                                  # detectBox.label)
#                 if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
#                     db.score = 0
#                 dboxs.append(db)
#                 all_similars.append(similar)
#             max_similars = max(all_similars)
#             max_index = all_similars.index(max_similars)
#             if final_strides is None:
#                 final_strides = strides[max_index]
#             db = dboxs[max_index]
#             if max_similars < 0.3:
#                 print(db.position)
#                 if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
#                     db.label = 'miss'
#                 else:
#                     db.label = 'unknown'
#
#             db_list = unique(db, db_list)
#             n += 1
#
#         # 从中间往右边
#         n = 1
#         final_strides = None
#         while r_up[0] + n * box_width < xmax:
#             yuzhi = np.sum(new_img[l_up[1]:l_down[1], l_up[0] + n * box_width:l_up[0] + (n + 1) * box_width]) / (
#                     (l_down[1] - l_up[1]) * box_width)
#             if yuzhi > 0.3:
#                 break
#             dboxs = []
#             all_similars = []
#             strides = init_strides if final_strides is None else [final_strides]
#             for stride in strides:
#                 if l_up[1] + stride < 0 or l_down[1] + stride > img.shape[0]:
#                     dboxs.append(DetectedBox([], 0, detectBox.label))
#                     all_similars.append(0)
#                     continue
#                 detect_box_l_up = (l_up[0] + n * box_width, l_up[1] + stride)
#                 detect_box_r_up = (l_up[0] + (n + 1) * box_width, l_up[1] + stride)
#                 detect_box_l_down = (l_up[0] + n * box_width, l_down[1] + stride)
#                 detect_box_r_down = (l_up[0] + (n + 1) * box_width, l_down[1] + stride)
#                 first = img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]]
#                 second = img[l_up[1]:l_down[1], l_up[0]:r_up[0]]
#                 # cv2.imshow("first", first)
#                 # cv2.imshow("second", second)
#                 # cv2.waitKey()
#                 similar, label = desnet_classify(desNet, first)
#                 # similar = image_retrieval(
#                 #     [img[detect_box_l_up[1]:detect_box_l_down[1], detect_box_l_up[0]:detect_box_r_up[0]],
#                 #      img[l_up[1]:l_down[1], l_up[0]:r_up[0]]], model, shape, layer, something)[0][0]
#                 # print(similar)
#                 db = DetectedBox([detect_box_l_up, detect_box_r_up, detect_box_r_down, detect_box_l_down],
#                                  similar,  "added")
#                                  # detectBox.label)
#                 if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
#                     db.score = 0
#                 dboxs.append(db)
#                 all_similars.append(similar)
#             max_similars = max(all_similars)
#             max_index = all_similars.index(max_similars)
#             if final_strides is None:
#                 final_strides = init_strides[max_index]
#             db = dboxs[max_index]
#             if max_similars < 0.3:
#                 if is_miss(img[db.position[0][1]:db.position[3][1], db.position[0][0]:db.position[1][0]]):
#                     db.label = 'miss'
#                 else:
#                     db.label = 'unknown'
#
#             db_list = unique(db, db_list)
#             n += 1
#
#     detectBoxes += db_list
#
#     return DetectResult(img, detectBoxes)
