# from volumeMeasure import VolumeMeasure
import cv2
import numpy as np
from keras.models import load_model
from loss_functions import cross_entropy_balanced, pixel_error
from configparser import ConfigParser
from PyQt5.QtWidgets import QApplication

import operator
import random
from utils import convex_area, nms
import sys
from SpecialRegion import SpecialRegion
import time
from collections import deque
import os
from utils import judge_sorted_list_bsimilar

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

class parallax:
    def __init__(self):
        self.leftX=0
        self.rightX=0
        self.paraValue=0

def ascendPara(a, b):
    if a.paraValue < b.paraValue:
        return -1
    elif a.paraValue > b.paraValue:
        return 1
    else:
        return 0

def get_f_r(pos, arr):
    l = r = pos
    for i in range(pos, 1, -1):
        if arr[i] > 0:
            l = i
            break
    for i in range(pos, len(arr)-1):
        if arr[i] > 0:
            r = i
            break
    return l, r

def surf_stereoMatch(left, right, bVisualized=False):
    img_shape = left.shape[:2]
    minhessian = 400
    matched_all_left_points = [[] for i in range(left.shape[0])]
    detector = cv2.xfeatures2d.SURF_create(minhessian)
    kpl, desl = detector.detectAndCompute(left, None)
    kpr, desr = detector.detectAndCompute(right, None)
    FLANN_INDEX_KDTREE=0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(desl, desr, k=2)
    matchesMask = [[0, 0] for i in range(len(matches))]
    # print(kpl, kpr)
    disparity = [[-1 for i in range(img_shape[1])] for i in range(img_shape[0])]
    right_matched = [[-1 for i in range(img_shape[1])] for i in range(img_shape[0])]
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8*n.distance and abs(kpl[m.queryIdx].pt[1]/kpr[m.trainIdx].pt[1]-1)<0.1 and kpl[m.queryIdx].pt[0]>kpr[m.trainIdx].pt[0]:
            matchesMask[i] = [1, 0]
            right_matched[int(kpl[m.queryIdx].pt[1])][int(kpl[m.queryIdx].pt[0])] = (int(kpr[m.trainIdx].pt[0]), int(kpr[m.trainIdx].pt[1]))
            disparity[int(kpl[m.queryIdx].pt[1])][int(kpl[m.queryIdx].pt[0])] = int(kpl[m.queryIdx].pt[0]-kpr[m.trainIdx].pt[0])
            matched_all_left_points[int(kpl[m.queryIdx].pt[1])].append(int(kpl[m.queryIdx].pt[0]))
            # print(m.queryIdx, m.trainIdx, kpl[m.queryIdx], kpr[m.trainIdx], kpl[m.queryIdx].pt, kpr[m.trainIdx].pt)
            # input()
    if bVisualized:
        draw_params = dict(matchColor=(0, 255, 0), singlePointColor=(255, 0, 0), matchesMask=matchesMask, flags=0)
        img3 = cv2.drawMatchesKnn(left, kpl, right, kpr, matches, None, **draw_params)
        return disparity, right_matched, img3, matched_all_left_points
    else:
        return disparity, right_matched, None, matched_all_left_points
    # cv2.imshow('matched', img3)
    # cv2.waitKey(0)
    #视差计算预处理？

def surf_stereoMatch1(left, right):
    minhessian = 100
    detector = cv2.xfeatures2d.SURF_create(minhessian)
    kpl, desl = detector.detectAndCompute(left, None)
    kpr, desr = detector.detectAndCompute(right, None)
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.match(desl, desr)
    maxdist = 0
    mindist = 2*left.shape[1]
    for match in matches:
        dist = match.distance
        if dist < mindist:
            mindist = dist
        if dist > maxdist:
            maxdist = dist
    goodMatches = []
    for match in matches:
        if match.distance < 2*mindist:
            goodMatches.append(match)
    paras = []
    temp = parallax()
    for g_match in goodMatches:
        temp.leftX = kpl[g_match.queryIdx].pt[0]
        temp.rightX = kpr[g_match.trainIdx].pt[0]
        temp.paraValue = temp.leftX-temp.rightX
        paras.append(temp)
    paras = sorted(paras, key=lambda x:x.paraValue)
    idxMedian = len(paras)//2
    paraMedian = paras[idxMedian].paraValue
    errorRange = 0.005#规范一定范围内的视差匹配点
    it = iter(paras)
    while True:
        try:
            para = next(it)
            if abs(para.paraValue/paraMedian-1) > errorRange:
                paras.remove(para)
        except StopIteration:
            break
    draw_params = dict(matchColor=(0, 255, 0), singlePointColor=(255, 0, 0), flags=0)
    show_img = cv2.drawMatches(left, kpl, right, kpr, goodMatches, None, **draw_params)
    # show_img = cv2.drawMatches(left, kpl, right, kpr, matches, None, **draw_params)
    cv2.imshow('matched', show_img)
    cv2.waitKey()

def someProcessDetailed(cv_image, cfg, bGray=0, bGaussianBlur=0, bCLAHE=0, bSharpen=0, bLaplacian=0, bOTSU=0, bRCF=0, bCanny=0):
    # self.cfg.read('SWQT.ini')
    # if cv_image.shape[2] == 4:
    #     cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGRA2BGR)
    if len(cv_image.shape) > 2 and bGray:
        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
    if bGaussianBlur:
        gaussianBlurKSize = cfg.getint('process', 'gaussianBlurKSize')
        cv_image = cv2.GaussianBlur(cv_image, (gaussianBlurKSize, gaussianBlurKSize), 1, 1)
    if bCLAHE:
        clahe = cv2.createCLAHE(clipLimit=cfg.getfloat('process', 'CLAHECoeff'))
        if len(cv_image.shape) == 2:
            cv_image = clahe.apply(cv_image)
        else:
            clahe_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2Lab)
            splited = cv2.split(clahe_image)
            dst = clahe.apply(splited[0])
            np.copyto(splited[0], dst)
            cv_image = cv2.merge(splited)
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_Lab2BGR)
    if bSharpen:
        laplacian = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
        cv_image = cv2.filter2D(cv_image, -1, laplacian)
    if bLaplacian:
        laplacian = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])
        cv_image = cv2.filter2D(cv_image, -1, laplacian)
    if bOTSU:
        if len(cv_image.shape) == 2:
            _, cv_image = cv2.threshold(cv_image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        else:
            splited = cv2.split(cv_image)
            for i in range(3):
                _, splited[i] = cv2.threshold(splited[i], 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            cv_image = cv2.merge(splited)
    if bRCF:
        old_shape = cv_image.shape
        if len(old_shape) == 2:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_GRAY2BGR)
        model = load_model('model/rcfmodel', custom_objects=
                   {'cross_entropy_balanced': cross_entropy_balanced, 'pixel_error': pixel_error})
        cv_image = cv2.resize(cv_image, (480, 320))
        y_pred = model.predict(cv_image.reshape((-1, 320, 480, 3)))[-1]
        y_pred = np.around(y_pred*255).astype(np.uint8).reshape((320, 480, 1))
        if len(old_shape) == 3:
            y_pred = cv2.cvtColor(y_pred, cv2.COLOR_GRAY2BGR)
        cv_image = cv2.resize(y_pred, (old_shape[1], old_shape[0]))
    if bCanny:
        cannyLowThresh = cfg.getint('process', 'cannyLowThresh')
        cannyHighThresh = cfg.getint('process', 'cannyHighThresh')
        # if len(cv_image.shape) == 2:
        #     cv_image = cv2.Canny(cv_image, cannyLowThresh, cannyHighThresh)
        # else:
        #     splited = cv2.split(cv_image)
        #     for i in range(3):
        #         splited[i] = cv2.Canny(splited[i], cannyLowThresh, cannyHighThresh)
        #     cv_image = cv2.merge(splited)
        if len(cv_image.shape) == 3:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        cv_image = cv2.Canny(cv_image, cannyLowThresh, cannyHighThresh)
    return cv_image

def compute_disparity(left, right):
    win_size = 11
    channels=1
    #left = someProcessDetailed(left, )
    #matcher_left = cv2.StereoSGBM_create(0, 16*7, 2, 8*2*win_size*win_size, 32*10*win_size*win_size, 8, 31, 1, 8, 8, cv2.STEREO_SGBM_MODE_SGBM_3WAY)
    matcher_left = cv2.StereoSGBM_create(0, 16*8, win_size, 4*channels*win_size*win_size, 32*channels*win_size*win_size, 1, 63, 10, 100, 32, cv2.STEREO_SGBM_MODE_SGBM)
    displ = matcher_left.compute(left, right)#.astype(np.float32)/16.0
    #true_displ = displ.astype(np.uint8)
    matcher_right = cv2.ximgproc.createRightMatcher(matcher_left)
    dispr = matcher_right.compute(left, right)#.astype(np.float32)/16.0
    #true_dispr = dispr.astype(np.uint8)
    filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left)
    filter.setLambda(3000)
    filter.setSigmaColor(10)
    #filtered_displ = filter.filter(true_displ, left, disparity_map_right=true_dispr)
    filtered_displ = filter.filter(displ, left, disparity_map_right=dispr)
    return displ, dispr, filtered_displ#缺少对displ和filtered_displ的astype(np.float32)/16

#referenced from https://github.com/SPengLiang/SGBM_OpenCV/blob/master/src/main.py
def SPengLiang_sgbm(left, right):
    # l = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY)
    # r = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY)
    window_size = 9
    min_disp = 0
    num_disp = 112-min_disp
    stereo = cv2.StereoSGBM_create(minDisparity=min_disp,
                                   numDisparities=num_disp,
                                   blockSize=8,
                                   P1=8*3*window_size**2,
                                   P2=32*3*window_size**2,
                                   disp12MaxDiff=1,
                                   uniquenessRatio=10,
                                   speckleWindowSize=100,
                                   speckleRange=32)
    disp = stereo.compute(left, right).astype(np.float32)/16.0
    new_disp = (disp-min_disp)/num_disp
    count = 0
    for j in range(new_disp.shape[0]):
        if new_disp[0, j] > 0:
            count = j
            break
    new_disp_pp = np.zeros_like(new_disp)
    for i in range(new_disp.shape[0]):
        for j in range(count, new_disp.shape[1]-1):
            if new_disp[i, j] <= 0:
                lef, righ = get_f_r(j, new_disp[i])
                new_disp_pp[i, j] = (new_disp[i, lef-1]+new_disp[i, righ+1])/2
            else:
                new_disp_pp[i, j] = new_disp[i, j]
    new_disp_pp = cv2.medianBlur(new_disp_pp, 7)
    print(new_disp)
    print(new_disp_pp)
    cv2.imshow('org_img', left)
    cv2.imshow('org_disp', new_disp)
    cv2.imshow('disp_pp', new_disp_pp)
    cv2.waitKey()
    cv2.destroyAllWindows()

def reproject(disparity, q):
    #print(disparity.shape)
    image3d = cv2.reprojectImageTo3D(disparity, q).reshape(disparity.shape[0], disparity.shape[1], 3)
    return image3d

def simplify_points(contours, min_area=10, lengCoeff=0.02):
    approxes = []
    approxes_bbox = []
    for contour in contours:
        convex_esti_area = convex_area(contour)
        if convex_esti_area < min_area:
            continue
        # leng = cv2.arcLength(contour, True)
        # approx = cv2.approxPolyDP(contour, leng*lengCoeff, False)
        # approxes.append(approx)
        approxes.append(contour)
        approxes_bbox.append(cv2.boundingRect(contour))
    return approxes, np.array(approxes_bbox)

def specialRegion_cmp(a, b):
    if a.bbox[1] < b.bbox[1]:
        return -1
    elif a.bbox[1] > b.bbox[1]:
        return 1
    else:
        return 0

def showContourAndBBox(pic, contours, bboxes, filename=None):
    pic = cv2.cvtColor(pic, cv2.COLOR_GRAY2BGR)
    cv2.drawContours(pic, contours, -1, (255, 0, 0))
    for idx, bbox in enumerate(bboxes):
        cv2.rectangle(pic, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (0, 0, 255))
        cv2.putText(pic, str(idx), (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
    cv2.imshow(str(time.time()), pic)
    if filename is not None:
        cv2.imwrite(filename, pic)

def tellw_stereoMatch2(left, right, left_ignore=140, right_ignore=140, float_height=25, bVisualized=False, bPointMatched=False):
    # bVisualized = True
    match_result = {}
    matched_all_left_points = [[] for i in range(left.shape[0])]
    assert left.shape == right.shape and len(left.shape) == 2, 'please get left and right with the same shape and the picture\'s channel has to be only one'
    img_shape = left.shape
    contours_left, hierarchy_left = cv2.findContours(left[:, left_ignore:], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    contours_right, hierarchy_right = cv2.findContours(right[:, :img_shape[1] - right_ignore], cv2.RETR_TREE,
                                                       cv2.CHAIN_APPROX_SIMPLE)
    approxes_left, approxes_left_bbox = simplify_points(contours_left, min_area=30)
    for al in approxes_left:
        for i in range(al.shape[0]):
            al[i][0][0]+=left_ignore
    approxes_left_bbox[:, 0]+=left_ignore
    approxes_right, approxes_right_bbox = simplify_points(contours_right, min_area=30)
    # print(approxes_left_bbox, approxes_left_bbox.shape, type(approxes_left_bbox))
    # input()
    # print('nms begins')
    def simplify_nmsed_contours_bboxes(approxes_left_bbox, approxes_right_bbox, approxes_left, approxes_right):
        nms_approxes_left_bbox_idx = nms(approxes_left_bbox)#nms之后已将轮廓按照矩形框的y1值进行索引排列
        nms_approxes_right_bbox_idx = nms(approxes_right_bbox)
        # print('after nms using nms results')
        new_approxes_left = []
        for i in nms_approxes_left_bbox_idx:
            new_approxes_left.append(approxes_left[i])
        approxes_left = new_approxes_left
        new_approxes_right = []
        for i in nms_approxes_right_bbox_idx:
            new_approxes_right.append(approxes_right[i])
        approxes_right = new_approxes_right
        approxes_left_bbox = approxes_left_bbox[nms_approxes_left_bbox_idx]
        approxes_right_bbox = approxes_right_bbox[nms_approxes_right_bbox_idx]
        return approxes_left_bbox, approxes_right_bbox, approxes_left, approxes_right
    approxes_left_bbox, approxes_right_bbox, approxes_left, approxes_right = simplify_nmsed_contours_bboxes(approxes_left_bbox, approxes_right_bbox, approxes_left, approxes_right)
    # print('nms finished')
    matched = []
    left_position_rel = np.array([[]])
    right_position_rel = np.array([[]])
    to_del = []
    # input()
    # print('check begins')
    for i in range(len(approxes_left_bbox)):
        d= {}
        d['matchable_right_idx']=np.where((approxes_right_bbox[:, 1]>=approxes_left_bbox[i][1]-float_height)&(approxes_right_bbox[:, 1]<=approxes_left_bbox[i][1]+float_height))[0]
        if len(d['matchable_right_idx'])==0:
            to_del.append(i)
            continue
        d['match'] = -1
        # d['width_check']=np.abs(approxes_left_bbox[i][2]/approxes_right_bbox[d['matchable_right_idx'], 2]-1)
        # d['height_check']=np.abs(approxes_left_bbox[i][3]/approxes_right_bbox[d['matchable_right_idx'], 3]-1)
        # d['ratio_check'] = np.abs(approxes_left_bbox[i][2]*approxes_right_bbox[d['matchable_right_idx'], 3]/approxes_left_bbox[i][3]/approxes_right_bbox[d['matchable_right_idx'], 2]-1)
        # wh_gate = np.concatenate((d['width_check'][:, np.newaxis], d['height_check'][:, np.newaxis],
        #                           d['ratio_check'][:, np.newaxis]), axis=1)
        width_check = np.abs(approxes_left_bbox[i][2] / approxes_right_bbox[d['matchable_right_idx'], 2] - 1)
        height_check = np.abs(approxes_left_bbox[i][3] / approxes_right_bbox[d['matchable_right_idx'], 3] - 1)
        ratio_check = np.abs(
            approxes_left_bbox[i][2] * approxes_right_bbox[d['matchable_right_idx'], 3] / approxes_left_bbox[i][3] /
            approxes_right_bbox[d['matchable_right_idx'], 2] - 1)
        wh_gate = np.concatenate((width_check[:, np.newaxis], height_check[:, np.newaxis],
                                  ratio_check[:, np.newaxis]), axis=1)
        d['wh_gate'] = wh_gate.max(1)
        condition_idx = np.where(d['wh_gate'] < 0.5)[0]
        if len(condition_idx) == 0:
            to_del.append(i)
            continue
        matched.append(d)
    if len(to_del) > 0:
        approxes_left_bbox = np.delete(approxes_left_bbox, to_del, 0)
        # approxes_left_to_remove = []
        # print(approxes_left, type(approxes_left))
        # input()
        # for idx, i in enumerate(to_del):
        #     approxes_left_to_remove.append(approxes_left[i])
        # for altr in approxes_left_to_remove:
        #     print(i, idx, i-idx, approxes_left[i-idx])
        #     approxes_left.remove(approxes_left[i-idx])
        new_approxes_left = []
        for i in range(len(approxes_left)):
            if i not in to_del:
                new_approxes_left.append(approxes_left[i])
        approxes_left = new_approxes_left
    if bVisualized:
        showContourAndBBox(left, approxes_left, approxes_left_bbox)
        showContourAndBBox(right, approxes_right, approxes_right_bbox)
        print(matched)
        cv2.waitKey()
    count = 0
    right_bbox_matched_state=[0 for i in range(len(approxes_right_bbox))]
    # print('match begins')
    for i in range(6):
        if len(approxes_left_bbox)-count==0:
            break
        for j in range(len(approxes_left_bbox)):
            if matched[j]['match']!=-1:
                continue
            bMatchedSuccessfully = False
            # matchable_right_idx_len = len(matched[j]['matcheble_right_idx'])
            condition_idx = np.where(matched[j]['wh_gate'] < 0.05 * (i + 1))[0]
            if left_position_rel.size == 0:
                if len(condition_idx) == 1:
                    bMatchedSuccessfully = True
            else:
                # available_right_idx = []
                # distance_vs = []
                dis = 1000000000
                for ci in condition_idx:
                    if not right_bbox_matched_state[matched[j]['matchable_right_idx'][ci]]:
                        right_responding_rel = right_position_rel[matched[j]['matchable_right_idx'][ci]]
                        if ((right_responding_rel*left_position_rel[j])>=0).all():
                            distance_v = np.abs(left_position_rel[j]-right_responding_rel)
                            # if len(np.where(distance_v<20)[0]) >count and np.max(distance_v)<50:
                            if np.max(distance_v) < 50:
                                # available_right_idx.append(matched[j]['matchable_right_idx'][ci])
                                # distance_vs.append(distance_v)
                                new_dis = np.sqrt(np.sum(np.square(distance_v)))
                                # if 1.2*new_dis < max(dis, new_dis):
                                #     dis = new_dis
                                #     condition_idx=ci
                                # else:
                                #     bMatchedSuccessfully = False
                                #     break
                                if new_dis < dis:
                                    dis = new_dis
                                    condition_idx = [ci]
                                    bMatchedSuccessfully = True
            if bMatchedSuccessfully:
                # print(matched[j]['matchable_right_idx'], condition_idx)
                matchable_right_i = matched[j]['matchable_right_idx'][condition_idx][0]
                matched[j]['match'] = matchable_right_i
                right_bbox_matched_state[matchable_right_i]=1
                count+=1
                if count<6:
                    if left_position_rel.size == 0:
                        # print(approxes_left_bbox, approxes_left_bbox[j][0], approxes_left_bbox[j][1])
                        left_x_distance = (approxes_left_bbox[:, 0]-approxes_left_bbox[j][0])[:, np.newaxis]
                        left_y_distance = (approxes_left_bbox[:, 1]-approxes_left_bbox[j][1])[:, np.newaxis]
                        # print(approxes_right_bbox[matchable_right_i], matchable_right_i)
                        # print(approxes_right_bbox[:, 0])
                        # print(approxes_right_bbox[matchable_right_i][0][0])
                        right_x_distance = (approxes_right_bbox[:, 0]-approxes_right_bbox[matchable_right_i][0])[:, np.newaxis]
                        right_y_distance = (approxes_right_bbox[:, 1]-approxes_right_bbox[matchable_right_i][1])[:, np.newaxis]
                        left_position_rel=np.concatenate((left_x_distance, left_y_distance), axis=1)
                        right_position_rel = np.concatenate((right_x_distance, right_y_distance), axis=1)
                    else:
                        left_x_distance = (approxes_left_bbox[:, 0] - approxes_left_bbox[j][0])[:, np.newaxis]
                        left_y_distance = (approxes_left_bbox[:, 1] - approxes_left_bbox[j][1])[:, np.newaxis]
                        right_x_distance = (approxes_right_bbox[:, 0] - approxes_right_bbox[matchable_right_i][0])[:,
                                           np.newaxis]
                        right_y_distance = (approxes_right_bbox[:, 1] - approxes_right_bbox[matchable_right_i][1])[:,
                                           np.newaxis]
                        left_position_rel = np.concatenate((left_position_rel, left_x_distance, left_y_distance), axis=1)
                        right_position_rel = np.concatenate((right_position_rel, right_x_distance, right_y_distance), axis=1)
    # print(img_shape)
    disparity = [[-1 for i in range(img_shape[1])]for i in range(img_shape[0])]
    right_matched = [[-1 for i in range(img_shape[1])] for i in range(img_shape[0])]
    right_pix_matched_state=[[0 for i in range(img_shape[1])] for i in range(img_shape[0])]
    # print(len(disparity), len(disparity[0]))
    if bPointMatched:
        area=approxes_left_bbox[:, 2]*approxes_left_bbox[:, 3]
        area_sorted_idx=area.argsort()
        print(area_sorted_idx)
        for i in area_sorted_idx:
            matched_right_i = matched[i]['match']
            if matched_right_i != -1:
                left_target = left[approxes_left_bbox[i][1]:approxes_left_bbox[i][1]+approxes_left_bbox[i][3], approxes_left_bbox[i][0]:approxes_left_bbox[i][0]+approxes_left_bbox[i][2]]
                right_target = right[approxes_right_bbox[matched_right_i][1]:approxes_right_bbox[matched_right_i][1]+approxes_right_bbox[matched_right_i][3], approxes_right_bbox[matched_right_i][0]:approxes_right_bbox[matched_right_i][0]+approxes_right_bbox[matched_right_i][2]]
                contours_left_target, hierarchy_left_target = cv2.findContours(left_target, cv2.RETR_TREE,
                                                                               cv2.CHAIN_APPROX_NONE)
                contours_right_target, hierarchy_right_target = cv2.findContours(right_target, cv2.RETR_TREE,
                                                                                 cv2.CHAIN_APPROX_NONE)
                left_target = np.pad(left_target, ((1, 1), (1, 1)), 'constant', constant_values=(0, 0))
                right_target = np.pad(right_target, ((1, 1), (1, 1)), 'constant', constant_values=(0, 0))

                # _, left_img = cv2.threshold(left_target, 128, 1, cv2.THRESH_BINARY)
                # _, right_img = cv2.threshold(right_target, 128, 1, cv2.THRESH_BINARY)
                # left_img = thinImage(left_img)
                # right_img = thinImage(right_img)
                # left_img[left_img == 1] = 255
                # right_img[right_img == 1] = 255
                # left_lines, left_flags = findLines(left_img)
                # right_lines, right_flags = findLines(right_img)
                bboxes_left_target = []
                bboxes_right_target = []
                for clt in contours_left_target:
                    bboxes_left_target.append(cv2.boundingRect(clt))
                bboxes_left_target = np.array(bboxes_left_target)
                for crt in contours_right_target:
                    bboxes_right_target.append(cv2.boundingRect(crt))
                bboxes_right_target = np.array(bboxes_right_target)
                bboxes_left_target, bboxes_right_target, contours_left_target, contours_right_target = simplify_nmsed_contours_bboxes(bboxes_left_target, bboxes_right_target, contours_left_target, contours_right_target)
                # print(hierarchy_left_target, hierarchy_right_target)
                # showContourAndBBox(left_target, [contours_left_target[0]], [], '1.jpg')
                # showContourAndBBox(left_target, [contours_left_target[1]], [], '2.jpg')
                # showContourAndBBox(right_target, [contours_right_target[0]], [], '3.jpg')
                # showContourAndBBox(right_target, [contours_right_target[1]], [], '4.jpg')
                # cv2.waitKey()
                target_similarity = cv2.matchShapes(left_target, right_target, cv2.CONTOURS_MATCH_I2, 0)
                # print(str(target_similarity))
                #if target_similarity <0.05:# and len(contours_left_target)==len(contours_right_target):
                    # for i in range(len(contours_left_target)):#经过可视化处理直接轮廓对应是不行的。
                    #     print(contours_left_target[i], contours_right_target[i])
                dict_contours_left_target={}
                dict_contours_right_target={}
                for clt in contours_left_target:
                    for clt_point in clt:
                        if not clt_point[0][1] in dict_contours_left_target.keys():
                            dict_contours_left_target[clt_point[0][1]]  = []
                        dict_contours_left_target[clt_point[0][1]].append(clt_point[0][0])
                for crt in contours_right_target:
                    for crt_point in crt:
                        if not crt_point[0][1] in dict_contours_right_target.keys():
                            dict_contours_right_target[crt_point[0][1]]  = []
                        dict_contours_right_target[crt_point[0][1]].append(crt_point[0][0])
                dict_contours_left_target_sorted_keys = sorted(dict_contours_left_target.keys())
                dict_contours_right_target_sorted_keys = sorted(dict_contours_right_target.keys())
                eight_neighbor_vec_left_contour_points = {}
                eight_neighbor_vec_right_contour_points = {}
                eight_neighbor_pos = [[-1, -1], [0, -1], [1, -1], [-1, 0], [1, 0], [-1, 1], [0, 1], [1, 1]]
                for y in dict_contours_left_target:
                    dict_contours_left_target[y].sort()
                    eight_neighbor_vec_left_contour_points[y]=[]
                    for idx, dcltx in enumerate(dict_contours_left_target[y]):
                        eight_neighbor_vec_left_contour_points[y].append([])
                        for ii in range(8):
                            if left_target[y+eight_neighbor_pos[ii][1], dcltx+eight_neighbor_pos[ii][0]]==255:
                                eight_neighbor_vec_left_contour_points[y][idx].append(1)
                            else:
                                eight_neighbor_vec_left_contour_points[y][idx].append(0)
                for y in dict_contours_right_target:
                    dict_contours_right_target[y].sort()
                    eight_neighbor_vec_right_contour_points[y] = []
                    for idx, dcrtx in enumerate(dict_contours_right_target[y]):
                        eight_neighbor_vec_right_contour_points[y].append([])
                        for ii in range(8):
                            if right_target[y + eight_neighbor_pos[ii][1], dcrtx + eight_neighbor_pos[ii][0]] == 255:
                                eight_neighbor_vec_right_contour_points[y][idx].append(1)
                            else:
                                eight_neighbor_vec_right_contour_points[y][idx].append(0)
                calculated_right_y = []
                for y in dict_contours_left_target_sorted_keys:
                    # print(i, matched_right_i)
                    right_y=int(y/approxes_left_bbox[i, 3]*approxes_right_bbox[matched_right_i, 3])
                    if right_y in calculated_right_y or right_y not in dict_contours_right_target_sorted_keys or right_y+approxes_right_bbox[matched_right_i, 1]>=img_shape[0]:
                        continue
                    calculated_right_y.append(right_y)
                    # print(y, right_y)
                    if target_similarity <0.05 and judge_sorted_list_bsimilar(dict_contours_left_target[y], dict_contours_right_target[right_y], approxes_left_bbox[i, 2],approxes_right_bbox[matched_right_i, 2]):
                        for ij in range(len(dict_contours_left_target[y])):
                            matched_left_x = approxes_left_bbox[i, 0]+dict_contours_left_target[y][ij]
                            matched_left_y = approxes_left_bbox[i, 1]+y
                            matched_right_x = approxes_right_bbox[matched_right_i, 0]+dict_contours_right_target[right_y][ij]
                            matched_right_y = approxes_right_bbox[matched_right_i, 1]+right_y
                            right_matched[matched_left_y][matched_left_x]=(matched_right_x, matched_right_y)
                            disparity[matched_left_y][matched_left_x] = matched_left_x-matched_right_x
                            matched_all_left_points[matched_left_y].append(matched_left_x)
                            right_pix_matched_state[matched_right_y][matched_right_x]=1
                    else:
                        for lidx, dcltx in enumerate(dict_contours_left_target[y]):
                            dis = 1000
                            matched_right_cx = -1
                            for ridx, dcrtx in enumerate(dict_contours_right_target[right_y]):
                                # print(matched_right_i, right_y, dcrtx)
                                # print(approxes_right_bbox[matched_right_i, 1])
                                if dcrtx-dcltx<-40 or right_pix_matched_state[approxes_right_bbox[matched_right_i, 1]+right_y][approxes_right_bbox[matched_right_i, 0]+dcrtx]:
                                    continue
                                elif dcrtx-dcltx>40:
                                    break
                                else:
                                    new_dis=20*(dcrtx/approxes_right_bbox[matched_right_i, 2]-dcltx/approxes_right_bbox[i, 2])**2
                                    for ii in range(8):
                                        new_dis+=abs(eight_neighbor_vec_left_contour_points[y][lidx][ii]-eight_neighbor_vec_right_contour_points[right_y][ridx][ii])
                                    if new_dis < dis:
                                        dis = new_dis
                                        matched_right_cx=dcrtx
                            matched_left_x = approxes_left_bbox[i, 0] + dcltx
                            matched_left_y = approxes_left_bbox[i, 1] + y
                            if dis < 5.5:
                                matched_right_x = approxes_right_bbox[matched_right_i, 0] + matched_right_cx
                                matched_right_y = approxes_right_bbox[matched_right_i, 1] + right_y
                                # print(matched_left_x, matched_left_y)
                                right_matched[matched_left_y][matched_left_x] = (matched_right_x, matched_right_y)
                                disparity[matched_left_y][matched_left_x] = matched_left_x - matched_right_x
                                right_pix_matched_state[matched_right_y][matched_right_x] = 1
                                matched_all_left_points[matched_left_y].append(matched_left_x)
                            else:
                                # print(matched_left_y, matched_left_x)
                                disparity[matched_left_y][matched_left_x] = -2
    return matched, approxes_left, approxes_right, approxes_left_bbox, approxes_right_bbox, disparity, right_matched, matched_all_left_points

def custom_3d_reconstruction(q, disparity,left_add=0, top_add=0):
    construction = [[-1 for i in range(len(disparity[0]))] for j in range(len(disparity))]
    for i in range(len(disparity)):
        for j in range(len(disparity[0])):
            if disparity[i][j] > 0:
                res = q.dot(np.array([j+left_add, i+top_add, disparity[i][j], 1]))
                construction[i][j]=[res[0]/res[3], res[1]/res[3], res[2]/res[3]]
    return construction

def tellw_stereoMatch1(left, right, left_ignore=140, right_ignore=140, float_height=25):
    match_result = {}
    assert left.shape == right.shape and len(left.shape) == 2, 'please get left and right with the same shape and the picture\'s channel has to be only one'
    img_shape = left.shape
    contours_left, hierarchy_left = cv2.findContours(left[:, left_ignore:], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    contours_right, hierarchy_right = cv2.findContours(right[:, :img_shape[1] - right_ignore], cv2.RETR_TREE,
                                                       cv2.CHAIN_APPROX_SIMPLE)
    approxes_left, approxes_left_bbox = simplify_points(contours_left, min_area=30)
    for al in approxes_left:
        for i in range(al.shape[0]):
            al[i][0][0]+=left_ignore
    approxes_right, approxes_right_bbox = simplify_points(contours_right, min_area=30)
    nms_approxes_left_bbox_idx = nms(approxes_left_bbox)#nms之后已将轮廓按照矩形框的y1值进行索引排列
    nms_approxes_right_bbox_idx = nms(approxes_right_bbox)
    new_approxes_left = []
    for i in nms_approxes_left_bbox_idx:
        new_approxes_left.append(approxes_left[i])
    approxes_left = new_approxes_left
    new_approxes_right = []
    for i in nms_approxes_right_bbox_idx:
        new_approxes_right.append(approxes_right[i])
    approxes_right = new_approxes_right
    left_srs = []
    for al in approxes_left:
        sr = SpecialRegion(al)
        left_srs.append(sr)
    # left_srs = sorted(left_srs, key=lambda x:x.bbox[1])
    right_srs = []
    for ar in approxes_right:
        sr = SpecialRegion(ar)
        right_srs.append(sr)
    # right_srs = sorted(right_srs, key=lambda x:x.bbox[1])
    current_rsr_index = 0
    # print(str(len(right_srs)))
    for idx, lsr in enumerate(left_srs):
        while right_srs[current_rsr_index].bbox[1] < lsr.bbox[1]-float_height:
            current_rsr_index+=1
        rsr_increment=0
        while current_rsr_index+rsr_increment < len(right_srs) and abs(right_srs[current_rsr_index+rsr_increment].bbox[1]-lsr.bbox[1]) < float_height:
            # print(str(current_rsr_index + rsr_increment))
            if(lsr.judge_matchSR(right_srs[current_rsr_index+rsr_increment], thresh=0.02)):
                match_result['l'+str(idx)]='r'+str(current_rsr_index+rsr_increment)
                #match_result['r'+str(current_rsr_index+rsr_increment)]='l'+str(idx)
                break
            rsr_increment += 1
    return match_result, left_srs, right_srs

def tellw_stereoMatch(left, right, left_src, right_src, left_ignore=140, right_ignore=160, win_size=7, float_height=3):
    assert left.shape == right.shape, 'please get left and right with the same shape'
    img_shape = left.shape[:2]
    neighbors = win_size//2
    matched_all_left_points = [[] for i in range(left.shape[0])]
    contours_left, hierarchy_left = cv2.findContours(left[:, left_ignore:], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    contours_right, hierarchy_right = cv2.findContours(right[:, :img_shape[1]-right_ignore], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    approxes_left, _ = simplify_points(contours_left, min_area=30)
    for al in approxes_left:
        for i in range(al.shape[0]):
            al[i][0][0]+=left_ignore
    approxes_right, _ = simplify_points(contours_right, min_area=30)
    keypoints_left = {}
    keypoints_right = {}
    match_points = [[-1 for i in range(img_shape[1])] for i in range(img_shape[0])]
    match_points_right = [[-1 for i in range(img_shape[1])] for i in range(img_shape[0])]
    disparity = [[-1 for i in range(img_shape[1])] for i in range(img_shape[0])]
    # right_cor_max = [[-1 for i in range(img_shape[1])] for i in range(img_shape[0])]
    # right_cor_max_src = [[-1 for i in range(img_shape[1])] for i in range(img_shape[0])]
    for i in range(img_shape[0]):
        keypoints_left[i] = []
        keypoints_right[i] = []
    for cl in approxes_left:
        for i in range(cl.shape[0]):
            if cl[i][0][0] < left_ignore:
                continue
            keypoints_left[cl[i][0][1]].append(cl[i][0][0])
    for cr in approxes_right:
        for i in range(cr.shape[0]):
            if cr[i][0][0] > img_shape[1]-right_ignore:
                continue
            keypoints_right[cr[i][0][1]].append(cr[i][0][0])
    for i in range(img_shape[0]):
        keypoints_left[i].sort()
        keypoints_right[i].sort()
    new_left = np.pad(left, ((neighbors, neighbors), (neighbors, neighbors)), 'constant', constant_values=(0, 0))
    new_right = np.pad(right, ((neighbors, neighbors), (neighbors, neighbors)), 'constant', constant_values=(0, 0))
    if len(left_src.shape) == 2:
        new_left_src = np.pad(left_src, ((neighbors, neighbors), (neighbors, neighbors)), 'constant', constant_values=(0, 0))
        new_right_src = np.pad(right_src, ((neighbors, neighbors), (neighbors, neighbors)), 'constant', constant_values=(0, 0))
    elif len(left_src.shape) == 3:
        new_left_src = np.pad(left_src, ((neighbors, neighbors), (neighbors, neighbors), (0,0)), 'constant',
                              constant_values=0)
        new_right_src = np.pad(right_src, ((neighbors, neighbors), (neighbors, neighbors), (0,0)), 'constant',
                               constant_values=0)
    for i in range(img_shape[0]):
        last_left_match = 0
        for j in keypoints_left[i]:
            current_max = 0
            target_match = 0
            current_max_src = 0
            target_match_src = 0
            right_match_points = []
            for k in range(0 if i-float_height<0 else i-float_height, img_shape[0] if i+float_height>img_shape[0] else i+float_height):
                for l in keypoints_right[k]:
                    if l > j or l < last_left_match or match_points_right[k][l]==0:
                        continue
                    right_match_points.append((l, k))
            if len(right_match_points) == 0:
                continue
            for rmp in right_match_points:
                correlation_v_src = 0
                correlation_v = 0
                for u in range(0, 2*neighbors+1):
                    for w in range(0, 2*neighbors+1):
                        correlation_v += int(new_left[i+u][j+w])*int(new_right[rmp[1]+u][rmp[0]+w])
                        # for m in range(3):
                        #     correlation_v_src += int(new_left_src[i+k][j+l][m])*int(new_right_src[rmp[1]+k][rmp[0]+l][m])
                        correlation_v_src += int(new_left_src[i + u][j + w]) * int(
                            new_right_src[rmp[1] + u][rmp[0] + w])
                if correlation_v_src > current_max_src:
                    current_max_src = correlation_v_src
                    target_match_src = rmp
                if correlation_v > current_max:
                    current_max = correlation_v
                    target_match = rmp
            if isinstance(target_match, tuple) and operator.eq(target_match, target_match_src) == True:
                match_points[i][j] =target_match
                last_left_match = target_match[0]
                match_points_right[target_match[1]][target_match[0]]=0
                disparity[i][j] = j-target_match[0]
                matched_all_left_points[i].append(j)
    # print(contours_left)
    # print(contours_right)
    # print(hierarchy_left, hierarchy_left.shape)
    # print(hierarchy_right, hierarchy_right.shape)
    return match_points, approxes_left, approxes_right, disparity, matched_all_left_points

def drawContourPoints(img, contours, density=0.5):
    count = 0
    for contour in contours:
        current_color = (0, (255+count*20)%256, 0)
        count+=1
        for i in range(contour.shape[0]):
            if random.random()<density:
                cv2.circle(img, (contour[i][0][0], contour[i][0][1]), 3, current_color)
    return img

def morphologyContourDetect(img):
    if len(img.shape) == 3:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    dilate = cv2.dilate(img, element)
    erode = cv2.erode(img, element)
    result = cv2.absdiff(dilate, erode)
    retval, result = cv2.threshold(result, 30, 255, cv2.THRESH_BINARY)
    cv2.imshow('morphologyContourDetect', result)
    cv2.waitKey(0)
    cv2.destroyWindow('morphologyContourDetect')
    return result

def morphologyCornerDetect(img):
    if len(img.shape) == 3:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    kernel1 = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
    kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    kernel2[0, 0] = 0
    kernel2[0, 1] = 0
    kernel2[1, 0] = 0
    kernel2[4, 4] = 0
    kernel2[4, 3] = 0
    kernel2[3, 4] = 0
    kernel2[4, 0] = 0
    kernel2[4, 1] = 0
    kernel2[3, 0] = 0
    kernel2[0, 3] = 0
    kernel2[0, 4] = 0
    kernel2[1, 4] = 0
    kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    kernel4 = np.zeros((5, 5), dtype=np.uint8)
    kernel4[0, 0] = 1
    kernel4[0, 4] = 1
    kernel4[1, 1] = 1
    kernel4[1, 3] = 1
    kernel4[2, 2] = 1
    kernel4[3, 1] = 1
    kernel4[3, 3] = 1
    kernel4[4, 0] = 1
    kernel4[4, 4] = 1
    dilated1 = cv2.dilate(img, kernel1)
    eroded1 = cv2.erode(dilated1, kernel2)
    dilated2 = cv2.dilate(img, kernel4)
    eroded2 = cv2.erode(dilated2, kernel3)
    result = cv2.absdiff(eroded1, eroded2)
    retval, binary_result = cv2.threshold(result, 20, 255, cv2.THRESH_BINARY)
    img_copy = img.copy()
    img_copy = cv2.cvtColor(img_copy, cv2.COLOR_GRAY2BGR)
    for j in range(binary_result.size):
        y = j//binary_result.shape[0]
        x = j%binary_result.shape[0]
        if binary_result[x, y] == 255:
            cv2.circle(img_copy, (y, x), 5, (255, 0, 0))
    return img_copy

def thinImage(src, maxIterations=-1):
    assert len(src.shape) == 2, 'please binarify pictures'
    img_height, img_width = src.shape
    dst = src.copy()
    count = 0
    while True:
        count +=1
        if maxIterations != -1 and count > maxIterations:
            break
        mFlag = []
        for i in range(img_height):
            for j in range(img_width):
                p1 = dst[i, j]
                if p1 != 1:
                    continue
                p4 = 0 if j == img_width-1 else dst[i, j+1]
                p8 = 0 if j == 0 else dst[i, j-1]
                p2 = 0 if i == 0 else dst[i-1, j]
                p3 = 0 if i == 0 or j == img_width-1 else dst[i-1, j+1]
                p9 = 0 if i == 0 or j == 0 else dst[i-1, j-1]
                p6 = 0 if i == img_height-1 else dst[i+1, j]
                p5 = 0 if i == img_height-1 or j == img_width-1 else dst[i+1, j+1]
                p7 = 0 if i == img_height-1 or j == 0 else dst[i+1, j-1]
                if p2+p3+p4+p5+p6+p7+p8+p9>=2 and p2+p3+p4+p5+p6+p7+p8+p9<=6:
                    ap = 0
                    if p2 == 0 and p3 ==1:
                        ap+=1
                    if p3 == 0 and p4 == 1:
                        ap+=1
                    if p4 ==0 and p5 == 1:
                        ap+=1
                    if p5 == 0 and p6 == 1:
                        ap+=1
                    if p6 == 0 and p7 == 1:
                        ap+=1
                    if p7 == 0 and p8 == 1:
                        ap+=1
                    if p8 == 0 and p9 == 1:
                        ap+=1
                    if p9 == 0 and p2 == 1:
                        ap+=1
                    if ap == 1 and p2*p4*p6 == 0 and p4*p6*p8 == 0:
                        mFlag.append([i, j])
        for flag in mFlag:
            dst[flag[0], flag[1]] = 0
        if len(mFlag) == 0:
            break
        else:
            mFlag.clear()
        for i in range(img_height):
            for j in range(img_width):
                p1 = dst[i, j]
                if p1 != 1:
                    continue
                p4 = 0 if j == img_width-1 else dst[i, j+1]
                p8 = 0 if j == 0 else dst[i, j-1]
                p2 = 0 if i == 0 else dst[i-1, j]
                p3 = 0 if i == 0 or j == img_width-1 else dst[i-1, j+1]
                p9 = 0 if i == 0 or j == 0 else dst[i-1, j-1]
                p6 = 0 if i == img_height-1 else dst[i+1, j]
                p5 = 0 if i == img_height-1 or j == img_width-1 else dst[i+1, j+1]
                p7 = 0 if i == img_height-1 or j == 0 else dst[i+1, j-1]
                if p2+p3+p4+p5+p6+p7+p8+p9>=2 and p2+p3+p4+p5+p6+p7+p8+p9<=6:
                    ap = 0
                    if p2 == 0 and p3 ==1:
                        ap+=1
                    if p3 == 0 and p4 == 1:
                        ap+=1
                    if p4 ==0 and p5 == 1:
                        ap+=1
                    if p5 == 0 and p6 == 1:
                        ap+=1
                    if p6 == 0 and p7 == 1:
                        ap+=1
                    if p7 == 0 and p8 == 1:
                        ap+=1
                    if p8 == 0 and p9 == 1:
                        ap+=1
                    if p9 == 0 and p2 == 1:
                        ap+=1
                    if ap == 1 and p2*p4*p8 == 0 and p2*p6*p8 == 0:
                        mFlag.append([i, j])
        for flag in mFlag:
            dst[flag[0], flag[1]] = 0
        if len(mFlag) == 0:
            break
        else:
            mFlag.clear()
    return dst

def findNextPoint(neighbor_points, image, inpoint, flag):
    i = flag
    count = 1
    success = False
    img_height, img_width = image.shape[:2]
    outpoint = (0, 0)
    outflag = 0
    while count <= 7:
        tmppoint_r = inpoint[0]+neighbor_points[i][0]
        tmppoint_c = inpoint[1]+neighbor_points[i][1]
        if tmppoint_r >= 0 and tmppoint_c >= 0 and tmppoint_c < img_width and tmppoint_r < img_height:
            if image[tmppoint_r, tmppoint_c] == 255:
                outpoint = (tmppoint_r, tmppoint_c)
                outflag = i
                success = True
                image[tmppoint_r, tmppoint_c] = 0
                break
        if count%2:
            i+=count
            if i > 7:
                i-=8
        else:
            i-=count
            if i < 0:
                i+=8
        count+=1
    return success, outpoint, outflag
#point 的坐标是（行数，列数）的意思
def findFirstPoint(inputimg):
    success = False
    outputpoint = (0, 0)
    img_height, img_width = inputimg.shape[:2]
    for i in range(img_height):
        for j in range(img_width):
            if inputimg[i, j] == 255:
                success = True
                outputpoint = (i, j)
                inputimg[i, j] = 0
                break
        if success:
            break
    return success, outputpoint

def findLines(inputimg):
    neighbor_points=[[-1, -1], [0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0]]
    success, firstpoint = findFirstPoint(inputimg)
    lines = []
    line_flags = []
    while success:
        line = deque([])
        line_flag = deque([])
        line.append(firstpoint)
        this_point = firstpoint
        this_flag = 0
        success, next_point, next_flag = findNextPoint(neighbor_points, inputimg, this_point, this_flag)
        while success:
            line.append(next_point)
            line_flag.append(next_flag)
            this_point = next_point
            this_flag = next_flag
            success, next_point, next_flag = findNextPoint(neighbor_points, inputimg, this_point, this_flag)
        this_point = firstpoint
        this_flag = 0
        success, next_point, next_flag = findNextPoint(neighbor_points, inputimg, this_point, this_flag)
        while success:
            line.appendleft(next_point)
            line_flag.appendleft(next_flag)
            this_point = next_point
            this_flag = next_flag
            success, next_point, next_flag = findNextPoint(neighbor_points, inputimg, this_point, this_flag)
        if len(line) > 10:
            lines.append(line)
            line_flags.append(line_flag)
        success, firstpoint = findFirstPoint(inputimg)
    return lines, line_flag

if __name__ == '__main__':
    from volumeMeasure import VolumeMeasure
    from SWQT1 import SWQT1
    startTime = time.time()
    vm = VolumeMeasure()
    cfg = ConfigParser()
    cfg.read('SWQT.ini')
    leftpic = cv2.imread('C:/Users/tellw/Desktop/bishe/items/calibed_pics/20200326101821left/23.jpg', 0)
    rightpic = cv2.imread('C:/Users/tellw/Desktop/bishe/items/calibed_pics/20200326101821right/23.jpg', 0)
    rec_left = vm.calibrator.rectify_left(leftpic)
    rec_right = vm.calibrator.rectify_right(rightpic)
    #rec_left = morphologyContourDetect(rec_left)
    # mcdl = morphologyCornerDetect(rec_left)
    # mcdr = morphologyCornerDetect(rec_right)
    # cv2.imshow('mcdl', mcdl)
    # cv2.imshow('mcdr', mcdr)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    # sys.exit()
    # cv2.imshow('rec_left', rec_left)
    # cv2.imshow('rec_right', rec_right)
    rec_left_roi_src = rec_left[25:455, 25:615]
    rec_right_roi_src = rec_right[25:455, 25:615]
    # SPengLiang_sgbm(rec_left_roi_src, rec_right_roi_src)
    # print('running %f s'%(time.time()-startTime))
    # sys.exit()
    rec_left_roi = someProcessDetailed(rec_left_roi_src, cfg=cfg, bGaussianBlur=1, bCanny=1)
    rec_right_roi = someProcessDetailed(rec_right_roi_src, cfg=cfg, bGaussianBlur=1, bCanny=1)
    # rec_left_roi = cv2.cvtColor(rec_left_roi, cv2.COLOR_BGR2GRAY)
    # rec_right_roi = cv2.cvtColor(rec_right_roi, cv2.COLOR_BGR2GRAY)
    # match_result, left_srs, right_srs = tellw_stereoMatch1(rec_left_roi, rec_right_roi)
    img_shape = rec_left_roi.shape[:2]
    match_result, approxes_left, approxes_right, approxes_left_bbox, approxes_right_bbox, disparity, right_matched = tellw_stereoMatch2(rec_left_roi, rec_right_roi, bPointMatched=True)
    # surf_stereoMatch(rec_left_roi, rec_right_roi)
    # surf_stereoMatch1(rec_left_roi_src, rec_right_roi_src)
    # print('running %f s'%(time.time()-startTime))
    # sys.exit()
    # matchPoints, approxes_left, approxes_right = tellw_stereoMatch(rec_left_roi, rec_right_roi, rec_left_roi_src, rec_right_roi_src)
    rec_left_roi = cv2.cvtColor(rec_left_roi, cv2.COLOR_GRAY2BGR)
    rec_right_roi = cv2.cvtColor(rec_right_roi, cv2.COLOR_GRAY2BGR)
    # rec_left_roi = drawContourPoints(rec_left_roi, approxes_left, density=1)
    # rec_right_roi = drawContourPoints(rec_right_roi, approxes_right, density=1)
    # for i, al in enumerate(approxes_left):
    #     x, y, w, h = cv2.boundingRect(al)
    #     cv2.rectangle(rec_left_roi, (x, y), (x+w, y+h), (0, 255, 255))
    #     cv2.putText(rec_left_roi, str(i), (x, y), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0))
    # for i, ar in enumerate(approxes_right):
    #     x, y, w, h = cv2.boundingRect(ar)
    #     cv2.rectangle(rec_right_roi, (x, y), (x+w, y+h), (0, 255, 255))
    #     cv2.putText(rec_right_roi, str(i), (x, y), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0))
    # cv2.drawContours(rec_left_roi, contours_left, -1, (255, 0, 0))
    # cv2.drawContours(rec_right_roi, contours_right, -1, (255, 0, 0))
    #tellw_stereoMatch1的可视化代码
    # for lkp, rkp in match_result.items():
    #     color = random.sample(range(256), 3)
    #     left_sr = left_srs[int(lkp[1:])]
    #     right_sr = right_srs[int(rkp[1:])]
    #     left_rect = left_sr.bbox
    #     right_rect = right_sr.bbox
    #     cv2.rectangle(rec_left_roi, (left_rect[0], left_rect[1]), (left_rect[0]+left_rect[2], left_rect[1]+left_rect[3]), color)
    #     cv2.rectangle(rec_right_roi, (right_rect[0], right_rect[1]), (right_rect[0]+right_rect[2], right_rect[1]+right_rect[3]), color)
    #     cv2.drawContours(rec_left_roi, [left_sr.contour], -1, color)
    #     cv2.drawContours(rec_right_roi, [right_sr.contour], -1, color)
    construction = custom_3d_reconstruction(vm.calibrator.q, disparity, 25, 25)
    #tellw_stereoMatch2的可视化代码
    for i, mr in enumerate(match_result):
        if mr['match'] != -1:
            color = random.sample(range(256), 3)
            cv2.rectangle(rec_left_roi, (approxes_left_bbox[i][0], approxes_left_bbox[i][1]),
                          (approxes_left_bbox[i][0] + approxes_left_bbox[i][2], approxes_left_bbox[i][1] + approxes_left_bbox[i][3]), color)
            cv2.rectangle(rec_right_roi, (approxes_right_bbox[mr['match']][0], approxes_right_bbox[mr['match']][1]), (approxes_right_bbox[mr['match']][0]+approxes_right_bbox[mr['match']][2], approxes_right_bbox[mr['match']][1]+approxes_right_bbox[mr['match']][3]), color)
            cv2.drawContours(rec_left_roi, [approxes_left[i]], -1, color)
            cv2.drawContours(rec_right_roi, [approxes_right[mr['match']]], -1, color)
    random_point = random.sample(range(img_shape[0]*img_shape[1]), 10000)
    hstack =np.hstack((rec_left_roi, rec_right_roi))
    # hstack =np.hstack((rec_left_roi_src, rec_right_roi_src))
    for i in range(20, 460, 40):
        cv2.line(hstack, (0, i), (1200, i), (0, 0, 255))
    # for random_p in random_point:
        #     #     point_x = random_p%img_shape[1]
        #     #     point_y = random_p//img_shape[1]
    #tellw_stereoMatch2点匹配可视化代码
    # for point_y in range(img_shape[0]):
    #     for point_x in range(img_shape[1]):
    #         if isinstance(construction[point_y][point_x], list):
    #             if random.randint(0, 50)==0:
    #                 cv2.line(hstack, (point_x, point_y), (590+right_matched[point_y][point_x][0], right_matched[point_y][point_x][1]), (255, 255, 0))
    #                 cv2.putText(hstack,
    #                             '(%d, %d, %d)' % (construction[point_y][point_x][0], construction[point_y][point_x][1], construction[point_y][point_x][2]),
    #                             (point_x, point_y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0))
    #tellw_stereoMatch可视化代码
    # for i in range(rec_right_roi.shape[0]):
    #     for j in range(rec_right_roi.shape[1]):
    #         if matchPoints[i][j] != 0:
    #             if random.randint(0, 20) == 0:
    #                 cv2.line(hstack, (j, i), (matchPoints[i][j][0]+590, matchPoints[i][j][1]), (255, 0, 0))
    # displ, dispr, true_displ = compute_disparity(rec_left_roi_src, rec_right_roi_src)
    #
    # img3d = reproject(true_displ, vm.calibrator.q)
    # for i in range(len(disparity)):
    #     for j in range(len(disparity[0])):
    #         if disparity[i][j] >=0:
    #             print(disparity[i][j])
    # sys.exit()
    print('running %f s'%(time.time()-startTime))
    a = QApplication([])
    #swqt1 = SWQT1(hstack, [displ, dispr, true_displ, img3d])
    swqt1 = SWQT1(hstack, [disparity, construction])
    swqt1.show()
    a.exec_()
    #cv2.waitKey()
    #cv2.destroyAllWindows()