from configparser import ConfigParser
import cv2
import numpy as np
import time
from utils import find_polys, compute_disparity, visualize_disparity, reproject, _distance, bounding_rect, geometry_via_contours, geometry_via_yolo, cal_new_disparity, S_for_poly, poly_rect
from yolo import YOLO
from vm_test import tellw_stereoMatch2, surf_stereoMatch, tellw_stereoMatch, custom_3d_reconstruction, simplify_points
import math
import random

class stereo_calibrator:
    def __init__(self, xmlfile, initmode):
        self.subpix_window_size = [4, 4]
        self.min_good_images = 1
        self.left_intrinsic = np.zeros((3, 3))
        self.left_coeffs = np.zeros((1, 5))
        self.right_intrinsic = np.zeros((3, 3))
        self.right_coeffs = np.zeros((1, 5))
        self.rotation = np.zeros((3, 3))
        self.transform = np.zeros((3, 1))
        self.r1 = np.zeros((3, 3))
        self.r2 = np.zeros((3, 3))
        self.p1 = np.zeros((3, 4))
        self.p2 = np.zeros((3, 4))
        self.q = np.zeros((4, 4))
        self.roi1 = np.zeros((4, 1))
        self.roi2 = np.zeros((4, 1))
        if initmode == 'static':
            assert self.load_params(xmlfile), 'failed to open the parameter file'
        elif initmode == 'calib':
            assert self.chessboard_calibrate(xmlfile), 'error occurred on calibration'
        elif initmode == 'matlab_calib':
            assert self.matlab_calibrate(xmlfile), 'error occurred on calibration'
        else:
            input('mode not supported.')

    def load_params(self, xmlfile):
        infile = cv2.FileStorage(xmlfile, cv2.FILE_STORAGE_READ)
        print('loading %s\'s params...'%xmlfile)
        if not infile.isOpened():
            return False
        self.left_intrinsic = infile.getNode('left_intrinsic').mat()
        self.left_coeffs = infile.getNode('left_coeffs').mat()
        self.right_intrinsic = infile.getNode('right_intrinsic').mat()
        self.right_coeffs = infile.getNode('right_coeffs').mat()
        self.rotation = infile.getNode('rotation').mat()
        self.transform = infile.getNode('transform').mat()
        self.r1 = infile.getNode('r1').mat()
        self.r2 = infile.getNode('r2').mat()
        self.p1 = infile.getNode('p1').mat()
        self.p2 = infile.getNode('p2').mat()
        self.q = infile.getNode('q').mat()
        self.roi1 = infile.getNode('roi1').mat()
        self.roi2 = infile.getNode('roi1').mat()
        infile.release()
        return True

    def chessboard_calibrate(self, xmlfile):
        storage = cv2.FileStorage(xmlfile, cv2.FILE_STORAGE_READ)
        if not storage.isOpened():
            return False
        image_size = [int(i) for i in storage.getNode('image_size').string().strip().split(' ')]
        pattern = [int(i) for i in storage.getNode('pattern').string().strip().split(' ')]#pattern 按行 列的含义排列
        grid_size = [float(i) for i in storage.getNode('grid_size').string().strip().split(' ')]
        path = storage.getNode('path').string().strip().split(';')
        storage.release()
        lefts = []
        rights = []
        for file in path:
            origin = cv2.imread(file, 0)
            assert origin.shape[0] == image_size[1] and origin.shape[1] == image_size[0]*2
            lefts.append(origin[:, :image_size[0]])
            rights.append(origin[:, image_size[0]:])
        return self._calibrate(lefts, rights, image_size, pattern, grid_size, self.subpix_window_size)

    def _calibrate(self, lefts, rights, image_size, pattern, grid_size, win_size):
        assert len(lefts) != 0
        assert len(lefts) == len(rights)
        total_frames = len(lefts)
        count_good = 0
        left_corners = []
        right_corners = []
        for i in range(total_frames):
            retl, left_corner = cv2.findChessboardCorners(lefts[i], (pattern[0], pattern[1]), None)
            retr, right_corner = cv2.findChessboardCorners(rights[i], (pattern[0], pattern[1]), None)
            if not retl or not retr:
                print('Bad frames %d'%i)
                continue
            retl, left_corner = cv2.find4QuadCornerSubpix(lefts[i], left_corner, win_size)
            retr, right_corner = cv2.find4QuadCornerSubpix(rights[i], right_corner, win_size)
            left_corners.append(left_corner)
            right_corners.append(right_corner)
            count_good+=1
        if count_good < self.min_good_images:
            return False
        corner_positions = self._cal_corner_positions(pattern, grid_size)
        corner_positions_vec = [corner_positions for i in range(count_good)]
        calib_left_ret, self.left_intrinsic, self.left_coeffs, left_rvecs, left_tvecs = cv2.calibrateCamera(corner_positions_vec, left_corners, image_size, self.left_intrinsic, self.left_coeffs)
        calib_right_ret, self.right_intrinsic, self.right_coeffs, right_rvecs, right_tvecs = cv2.calibrateCamera(corner_positions_vec, right_corners, image_size, self.right_intrinsic, self.right_coeffs)
        print('left calibration error: {}, right calibration error: {}'.format(calib_left_ret, calib_right_ret))
        stereo_calib_ret, self.left_intrinsic, self.left_coeffs, self.right_intrinsic, self.right_coeffs, self.rotation, self.transform, E, F = cv2.stereoCalibrate(corner_positions_vec, left_corners, right_corners, self.left_intrinsic, self.left_coeffs, self.right_intrinsic, self.right_coeffs, image_size)
        print('stereo calibration error: {}'.format(stereo_calib_ret))
        self.r1, self.r2, self.p1, self.p2, self.q = cv2.stereoRectify(self.left_intrinsic, self.left_coeffs, self.right_intrinsic, self.right_coeffs, image_size, self.rotation, self.transform)
        return True

    def matlab_calibrate(self, xmlfile):
        if not self.load_params_matlab(xmlfile):
            return False
        img_width = int(input('标定时设置的单张图片宽度：'))
        img_height = int(input('标定时设置的单张图片高度：'))
        self.r1, self.r2, self.p1, self.p2, self.q = cv2.stereoRectify(self.left_intrinsic, self.left_coeffs, self.right_intrinsic, self.right_coeffs, (img_width, img_height), self.rotation, self.transform)
        return True

    def load_params_matlab(self, xmlfile):
        storage = cv2.FileStorage(xmlfile, cv2.FILE_STORAGE_READ)
        if not storage.isOpened():
            return False
        self.left_intrinsic = storage.getNode('left_intrinsic').mat()
        self.left_coeffs = storage.getNode('left_coeffs').mat()
        self.right_intrinsic = storage.getNode('right_intrinsic').mat()
        self.right_coeffs = storage.getNode('right_coeffs').mat()
        self.rotation = storage.getNode('rotation').mat()
        self.transform = storage.getNode('transform').mat()
        storage.release()
        return True

    def rectify_left(self, left_img):
        new_size = (left_img.shape[1], left_img.shape[0])
        mapx, mapy = cv2.initUndistortRectifyMap(self.left_intrinsic, self.left_coeffs, self.r1, self.p1, new_size, cv2.CV_8U)
        rectified = cv2.remap(left_img, mapx, mapy, cv2.INTER_LINEAR)
        return rectified

    def rectify_right(self, right_img):
        new_size = (right_img.shape[1], right_img.shape[0])
        mapx, mapy = cv2.initUndistortRectifyMap(self.right_intrinsic, self.right_coeffs, self.r2, self.p2, new_size, cv2.CV_8U)
        rectified = cv2.remap(right_img, mapx, mapy, cv2.INTER_LINEAR)
        return rectified



    def save_params(self, xmlfile):
        storage = cv2.FileStorage(xmlfile, cv2.FILE_STORAGE_WRITE+cv2.FILE_STORAGE_FORMAT_XML)
        if not storage.isOpened():
            return False
        storage.write('left_intrinsic', self.left_intrinsic)
        storage.write('left_coeffs', self.left_coeffs)
        storage.write('right_intrinsic', self.right_intrinsic)
        storage.write('right_coeffs', self.right_coeffs)
        storage.write('rotation', self.rotation)
        storage.write('transform', self.transform)
        storage.write('r1', self.r1)
        storage.write('r2', self.r2)
        storage.write('p1', self.p1)
        storage.write('p2', self.p2)
        storage.write('q', self.q)
        storage.release()
        return True

    def _cal_corner_positions(self, pattern, grid_size):#may have some problems, see Calib1.py
        result = []
        for col in range(pattern[1]):
            for row in range(pattern[0]):
                result.append([col*grid_size[0], row*grid_size[1], 0])#x轴，y轴分别是图像坐标系的两个轴，z轴垂直于地面竖直向上，grid_size还是按照图像坐标系的习惯索引0即是x轴方向的单网格长度，索引1即是y轴方向的单网格长度
        return result

class VolumeMeasure:
    def __init__(self):
        self.cfg = ConfigParser()
        self.cfg.read('SWQT.ini')
        self.bFromTop = self.cfg.getboolean('binocular_cameras', 'bFromTop')
        self.bVisualized = self.cfg.getboolean('binocular_cameras', 'bVisualized')
        self.bCollected = self.cfg.getboolean('binocular_cameras', 'bCollected')
        self.bTimeRecorded = self.cfg.getboolean('binocular_cameras', 'bTimeRecorded')
        self.bmm = self.cfg.getboolean('binocular_cameras', 'bmm')
        self.bAuto = self.cfg.getboolean('binocular_cameras', 'bAuto')
        self.detectCount = self.cfg.getint('binocular_cameras', 'detectCount')
        self.xmlfile = self.cfg.get('binocular_cameras', 'xmlfile')
        self.camera_height = self.cfg.getfloat('binocular_cameras', 'cameraToGroundHeight')
        self.processThing = self.cfg.get('binocular_cameras', 'processThing')
        self.disparityStyle = self.cfg.get('binocular_cameras', 'disparityStyle')
        self.constructionStyle = self.cfg.get('binocular_cameras', 'constructionStyle')
        self.sizeCalculation = self.cfg.get('binocular_cameras', 'sizeCalculation')
        self.picWidth = self.cfg.getint('binocular_cameras', 'picWidth')
        self.picHeight = self.cfg.getint('binocular_cameras', 'picHeight')
        self.calibrator = stereo_calibrator(self.xmlfile, 'static')
        self.bShowed = self.bVisualized and not self.bTimeRecorded
        self.cut_rate = self.cfg.getfloat('binocular_cameras', 'cut_rate')
        self.yolo = YOLO()

    def detect(self, left, right, left_src, right_src, target_area=None):
        bSGBM=False
        bAllReporj = False
        if self.bCollected:
            cv2.imwrite('collect/left/%d.jpg'%self.detectCount, left_src)
            cv2.imwrite('collect/right/%d.jpg'%self.detectCount, right_src)
        if self.bTimeRecorded:
            startTime = time.time()
        # rectified_left = self.calibrator.rectify_left(left)
        # rectified_right = self.calibrator.rectify_right(right)
        # rectified_left_src = self.calibrator.rectify_left(left_src)
        # rectified_right_src = self.calibrator.rectify_right(right_src)
        # if self.bShowed:
        #     cv2.imshow('rectified_l', rectified_left)
        #     cv2.imshow('rectified_r', rectified_right_src)
        #     cv2.waitKey()
        roi_y1 = int(self.cut_rate*self.picHeight)
        roi_y2 = self.picHeight-roi_y1
        roi_x1 = int(self.cut_rate*self.picWidth)
        roi_x2 = self.picWidth-roi_x1
        left_roi = left[roi_y1:roi_y2, roi_x1:roi_x2]
        right_roi = right[roi_y1:roi_y2, roi_x1:roi_x2]
        left_src_roi = left_src[roi_y1:roi_y2, roi_x1:roi_x2]
        right_src_roi = right_src[roi_y1:roi_y2, roi_x1:roi_x2]
        img_shape = (roi_y2-roi_y1, roi_x2-roi_x1)
        if self.bShowed:
            cv2.imshow('l_roi', left_roi)
            cv2.imshow('r_roi', right_src_roi)
            cv2.waitKey()
        if target_area:
            polys = np.array([])
            for targe in target_area:
                xmin, ymin, xmax, ymax = bounding_rect(targe[1])
                targe_y1_in_roi = max(0, ymin-roi_y1)
                targe_y2_in_roi = min(img_shape[0], ymax-roi_y1)
                targe_x1_in_roi = max(0, xmin-roi_x1)
                targe_x2_in_roi = min(img_shape[1], xmax-roi_x1)
                left_roi_targe = left_roi[targe_y1_in_roi:targe_y2_in_roi, targe_x1_in_roi:targe_x2_in_roi]
                left_src_roi_targe = left_src_roi[targe_y1_in_roi:targe_y2_in_roi, targe_x1_in_roi:targe_x2_in_roi]
                if self.bFromTop or self.processThing == 'Contour looking for polys':
                    if len(left_roi.shape) == 2:
                        polys = np.concatenate((polys, find_polys(left_roi_targe, self.bShowed, 0.2, hands_origin=(targe_x1_in_roi, targe_y1_in_roi))))
                    else:
                        return 1
                elif self.processThing == 'Yolo and geometry':
                    out_boxes, out_scores, out_classes = self.yolo.detect_image(left_src_roi_targe)
                    polys = np.concatenate((polys, geometry_via_yolo(out_classes, out_boxes, left_roi_targe, (targe_x1_in_roi, targe_y1_in_roi))))
                elif self.processThing == 'Contour and geometry':
                    polys = np.concatenate((polys, geometry_via_contours(left_roi_targe, (targe_x1_in_roi, targe_y1_in_roi))))
        else:
            if self.bFromTop or self.processThing == 'Contour looking for polys':
                if len(left_roi.shape) == 2:
                    polys = find_polys(left_roi, self.bShowed, 0.002)
                else:
                    return 1
            elif self.processThing == 'Yolo and geometry':
                out_boxes, out_scores, out_classes = self.yolo.detect_image(left_src_roi)
                polys = geometry_via_yolo(out_classes, out_boxes, left_roi)
            elif self.processThing == 'Contour and geometry':
                polys = geometry_via_contours(left_roi)
        if self.disparityStyle == 'SGBMDisparity':
            bSGBM = True
            disparity = compute_disparity(left_src_roi, right_src_roi)
            if self.bShowed:
                cv2.imshow('disp', visualize_disparity(disparity))
                cv2.waitKey()
        elif self.disparityStyle == 'BoundingRect':
            match_result, approxes_left, approxes_right, approxes_left_bbox, approxes_right_bbox, disparity, right_matched, matched_all_left_points = tellw_stereoMatch2(left_roi, right_roi, bPointMatched=True)
            if self.bVisualized:
                left_roi_copy = left_roi.copy()
                right_roi_copy = right_roi.copy()
                left_roi_copy = cv2.cvtColor(left_roi_copy, cv2.COLOR_GRAY2RGB)
                right_roi_copy = cv2.cvtColor(right_roi_copy, cv2.COLOR_GRAY2RGB)
                for i, mr in enumerate(match_result):
                    if mr['match'] != -1:
                        color = random.sample(range(256), 3)
                        cv2.rectangle(left_roi_copy, (approxes_left_bbox[i][0], approxes_left_bbox[i][1]),
                                      (approxes_left_bbox[i][0] + approxes_left_bbox[i][2],
                                       approxes_left_bbox[i][1] + approxes_left_bbox[i][3]), color)
                        cv2.rectangle(right_roi_copy,
                                      (approxes_right_bbox[mr['match']][0], approxes_right_bbox[mr['match']][1]), (
                                      approxes_right_bbox[mr['match']][0] + approxes_right_bbox[mr['match']][2],
                                      approxes_right_bbox[mr['match']][1] + approxes_right_bbox[mr['match']][3]), color)
                        cv2.drawContours(left_roi_copy, [approxes_left[i]], -1, color)
                        cv2.drawContours(right_roi_copy, [approxes_right[mr['match']]], -1, color)
                hstack = np.hstack((left_roi_copy, right_roi_copy))
                # hstack =np.hstack((rec_left_roi_src, rec_right_roi_src))
                for i in range(20, img_shape[0], 40):
                    cv2.line(hstack, (0, i), (2*img_shape[1], i), (0, 0, 255))
                for point_y in range(img_shape[0]):
                    for point_x in range(img_shape[1]):
                        if disparity[point_y][point_x]>0:
                            if random.randint(0, 50)==0:
                                cv2.line(hstack, (point_x, point_y), (img_shape[1]+right_matched[point_y][point_x][0], right_matched[point_y][point_x][1]), (255, 255, 0))
                cv2.imshow('matched', hstack)
                cv2.waitKey(0)
        elif self.disparityStyle == 'SURF':
            if self.bVisualized:
                disparity, right_matched, match_img, matched_all_left_points = surf_stereoMatch(left_roi, right_roi, True)
                cv2.imshow('matched', match_img)
                cv2.waitKey()
            else:
                disparity, right_matched, _, matched_all_left_points = surf_stereoMatch(left_roi, right_roi)
        elif self.disparityStyle == 'Cross-correlation':
            right_matched, approxes_left, approxes_right, disparity, matched_all_left_points = tellw_stereoMatch(left_roi, right_roi, left_src_roi, right_src_roi)
            if self.bVisualized:
                left_roi_copy = left_roi.copy()
                right_roi_copy = right_roi.copy()
                left_roi_copy = cv2.cvtColor(left_roi_copy, cv2.COLOR_GRAY2RGB)
                right_roi_copy = cv2.cvtColor(right_roi_copy, cv2.COLOR_GRAY2RGB)
                hstack = np.hstack((left_roi_copy, right_roi_copy))
                for i in range(20, img_shape[0], 40):
                    cv2.line(hstack, (0, i), (2 * img_shape[1], i), (0, 0, 255))
                for point_y in range(img_shape[0]):
                    for point_x in range(img_shape[1]):
                        if disparity[point_y][point_x] > 0:
                            if random.randint(0, 50) == 0:
                                cv2.line(hstack, (point_x, point_y), (
                                img_shape[1] + right_matched[point_y][point_x][0], right_matched[point_y][point_x][1]),
                                         (255, 255, 0))
                cv2.imshow('matched', hstack)
                cv2.waitKey(0)
        if self.bFromTop or self.processThing == 'Contour looking for polys' and bSGBM:
            bAllReporj = True
            disparity = np.pad(disparity, ((roi_y1, 0), (roi_x1, 0)), 'constant', constant_values=(1, 1))
            cloud = reproject(disparity, self.calibrator.q, self.bmm)
        else:
            seek_left_src_roi = left_src_roi / 255
            seek_right_src_roi = right_src_roi / 255
            left_marks = []
            right_marks = []
            mark_y_flag = [0 for i in range(img_shape[0])]
            for mark_i in range(10):
                while True:
                    mark_y = random.randint(0, img_shape[0] - 1)
                    if not mark_y_flag[mark_y] and len(matched_all_left_points[mark_y]) > 0:
                        mark_x = random.sample(matched_all_left_points[mark_y], 1)
                        left_marks.append([mark_x, mark_y])
                        right_marks.append(list(right_matched[mark_y][mark_x]))
                        mark_y_flag[mark_y] = 1
            left_marks = np.array(left_marks)
            right_marks = np.array(right_marks)
            for poly in polys:
                if isinstance(poly[0], int):
                    for po in poly[1]:
                        for p in po:
                            if p[0][0] >= 0 and p[0][1]>=0:
                                disparity[p[0][1]][p[0][0]] = cal_new_disparity((p[0][0], p[0][1]), left_marks, right_marks, seek_left_src_roi, seek_right_src_roi)
                else:
                    for po in poly:
                        if po[0][0]>=0 and po[0][1]>=0:
                            disparity[po[0][1]][po[0][0]] = cal_new_disparity((po[0][0], po[0][1]), left_marks, right_marks, seek_left_src_roi, seek_right_src_roi)
            cloud = custom_3d_reconstruction(self.calibrator.q, disparity, roi_x1, roi_y1)
        if len(left_src.shape) == 2:
            resShow = np.stack((left_src_roi,)*3, axis=-1)
        else:
            resShow = left_src_roi.copy()
        if self.sizeCalculation == 'Rect calculation':
            z_ratio=1
            if self.processThing == 'Contour looking for polys':
                # for poly in polys:
                #     pt0 = cloud[poly[0][0][1]][poly[0][0][0]]
                #     pt1 = cloud[poly[1][0][1]][poly[1][0][0]]
                #     pt2 = cloud[poly[2][0][1]][poly[2][0][0]]
                #     pt3 = cloud[poly[3][0][1]][poly[3][0][0]]
                #     if self.bShowed:
                #         print('(%d, %d)\'s world coordinate is (%d, %d, %d)'%(poly[0][0][0], poly[0][0][1], pt0[0], pt0[1], pt0[2]))
                #         print('(%d, %d)\'s world coordinate is (%d, %d, %d)'%(poly[1][0][0], poly[1][0][1], pt1[0], pt1[1], pt1[2]))
                #         print('(%d, %d)\'s world coordinate is (%d, %d, %d)'%(poly[2][0][0], poly[2][0][1], pt2[0], pt2[1], pt2[2]))
                #         print('(%d, %d)\'s world coordinate is (%d, %d, %d)'%(poly[3][0][0], poly[3][0][1], pt3[0], pt3[1], pt3[2]))
                #     a = (_distance(pt0, pt1)+_distance(pt2, pt3))/2
                #     b = (_distance(pt0, pt3)+_distance(pt1, pt2))/2
                #     if self.bmm:
                #         c = self.camera_height*10-min([pt0[2], pt1[2], pt2[2], pt3[2], self.camera_height*10])
                #     else:
                #         c = self.camera_height/100-min([pt0[2], pt1[2], pt2[2], pt3[2], self.camera_height/100])
                #     if not self.bmm:
                #         a*=1000
                #         b*=1000
                #         c*=1000
                #     astr = '%.3fcm'%(a/10)
                #     bstr = '%.3fcm'%(b/10)
                #     cstr = '%.3fcm'%(c/10)
                #     vstr = '%.3fdm^3'%(a*b*c/1000000)
                #     if self.bTimeRecorded:
                #         duration = time.time()-startTime
                #         print('costs %ds, fps: %f'%(int(duration), 1/duration))
                #     elif self.bShowed:
                #         print('a: %s; b: %s; c: %s; v: %s'%(astr, bstr, cstr, vstr))
                #     cv2.line(resShow, (poly[0][0][0]+int(self.picWidth*self.cut_rate), poly[0][0][1]+int(self.picHeight*self.cut_rate)), (poly[1][0][0]+int(self.picWidth*self.cut_rate), poly[1][0][1]+int(self.picHeight*self.cut_rate)), (0, 0, 255))
                #     cv2.putText(resShow, astr, (int((poly[0][0][0]+poly[1][0][0])/2+self.picWidth*self.cut_rate), int((poly[0][0][1]+poly[1][0][1])/2+self.picHeight*self.cut_rate)), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0))
                #     cv2.line(resShow, (poly[2][0][0] + int(self.picWidth * self.cut_rate), poly[2][0][1] + int(self.picHeight * self.cut_rate)), (poly[1][0][0] + int(self.picWidth * self.cut_rate), poly[1][0][1] + int(self.picHeight * self.cut_rate)), (0, 0, 255))
                #     cv2.putText(resShow, bstr, (int((poly[2][0][0] + poly[1][0][0]) / 2 + self.picWidth * self.cut_rate), int((poly[2][0][1] + poly[1][0][1]) / 2 + self.picHeight * self.cut_rate)), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0))
                #     cv2.putText(resShow, cstr, (int((poly[2][0][0] + poly[0][0][0]) / 2 + self.picWidth * self.cut_rate), int((poly[2][0][1] + poly[0][0][1]) / 2 + self.picHeight * self.cut_rate)), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))
                #     VStrPosX = poly[1][0][0]+int(self.picWidth*self.cut_rate) if poly[1][0][0]+int(self.picWidth*self.cut_rate) < self.picWidth-40 else self.picWidth-40
                #     cv2.putText(resShow, vstr, (VStrPosX, poly[1][0][1]+int(self.picHeight*self.cut_rate)), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255))
                #     rect = cv2.boundingRect(poly)
                #     cv2.rectangle(resShow, (rect[0]+int(self.picWidth*self.cut_rate), rect[1]+int(self.picHeight*self.cut_rate)), (rect[0]+rect[2]+int(self.picWidth*self.cut_rate), rect[1]+rect[3]+int(self.picHeight*self.cut_rate)), (255, 0, 0))
                if bAllReporj:
                    cal_results = []
                    for poly in polys:
                        if len(poly.shape) == 4:
                            poly = poly.reshape((poly.shape[0], 1, 2))
                        underside_S=S_for_poly(poly, cloud, reproj_origins=(roi_x1, roi_y1))
                        hs = []
                        for po in poly:
                            h=self.camera_height-cloud[roi_y1+po[0][1]][roi_x1+po[0][0]][2]*z_ratio
                            if h>0:
                                hs.append(h)
                        if len(hs) == 0:
                            end_h=0
                        else:
                            hs=np.array(hs)
                            end_h = np.mean(hs)
                        result = underside_S*end_h
                        cal_results.append(result)
                else:
                    cal_results = []
                    for poly in polys:
                        if len(poly.shape) == 4:
                            poly = poly.reshape((poly.shape[0], 1, 2))
                        underside_S = S_for_poly(poly, cloud)
                        hs = []
                        for po in poly:
                            h = self.camera_height - cloud[po[0][1]][po[0][0]][2]*z_ratio
                            if h > 0:
                                hs.append(h)
                        if len(hs) == 0:
                            end_h = 0
                        else:
                            hs = np.array(hs)
                            end_h = np.mean(hs)
                        result = underside_S * end_h
                        cal_results.append(result)
            else:
                if bAllReporj:
                    cal_results = []
                    for poly in polys:
                        if poly[0] == 0:
                            d1 = _distance(cloud[roi_y1+poly[1][0][0][0][1]][roi_x1+poly[1][0][0][0][0]], cloud[roi_y1+poly[1][0][1][0][1]][roi_x1+poly[1][0][1][0][0]])
                            d2 = _distance(cloud[roi_y1+poly[1][0][2][0][1]][roi_x1+poly[1][0][2][0][0]], cloud[roi_y1+poly[1][0][2][0][1]][roi_x1+poly[1][0][2][0][0]])
                            underside_S = math.pi*((d1+d2)/4)**2
                            hs = []
                            for polidx, pol in enumerate(poly[1][1]):
                                if pol[0][0] >=0 and pol[0][1]>=0:
                                    hs.append(_distance(cloud[roi_y1+pol[0][1]][roi_x1+pol[0][0]], cloud[roi_y1+poly[1][0][polidx][0][1]][roi_x1+poly[1][0][polidx][0][0]]))
                            if len(hs) == 0:
                                end_h = 0
                            else:
                                hs = np.array(hs)
                                end_h = np.mean(hs)
                            result = underside_S * end_h
                            cal_results.append(result)
                        elif poly[0] == 1:
                            d1 = _distance(cloud[roi_y1 + poly[1][0][0][0][1]][roi_x1 + poly[1][0][0][0][0]],
                                           cloud[roi_y1 + poly[1][0][1][0][1]][roi_x1 + poly[1][0][1][0][0]])
                            d2 = _distance(cloud[roi_y1 + poly[1][0][2][0][1]][roi_x1 + poly[1][0][2][0][0]],
                                           cloud[roi_y1 + poly[1][0][2][0][1]][roi_x1 + poly[1][0][2][0][0]])
                            result = 4*math.pi*((d1+d2)/4)**3/3
                            cal_results.append(result)
                        else:
                            underside_S = S_for_poly(poly[1][0], cloud, reproj_origins=(roi_x1, roi_y1))
                            hs = []
                            for polidx, pol in enumerate(poly[1][1]):
                                if pol[0][0] >= 0 and pol[0][1] >= 0:
                                    hs.append(_distance(cloud[roi_y1 + pol[0][1]][roi_x1 + pol[0][0]],
                                                        cloud[roi_y1 + poly[1][0][polidx][0][1]][
                                                            roi_x1 + poly[1][0][polidx][0][0]]))
                            if len(hs) == 0:
                                end_h = 0
                            else:
                                hs = np.array(hs)
                                end_h = np.mean(hs)
                            result = underside_S * end_h
                            cal_results.append(result)
                else:
                    cal_results = []
                    for poly in polys:
                        if poly[0] == 0:
                            d1 = _distance(cloud[poly[1][0][0][0][1]][poly[1][0][0][0][0]],
                                           cloud[poly[1][0][1][0][1]][poly[1][0][1][0][0]])
                            d2 = _distance(cloud[poly[1][0][2][0][1]][poly[1][0][2][0][0]],
                                           cloud[poly[1][0][2][0][1]][poly[1][0][2][0][0]])
                            underside_S = math.pi * ((d1 + d2) / 4) ** 2
                            hs = []
                            for polidx, pol in enumerate(poly[1][1]):
                                if pol[0][0] >= 0 and pol[0][1] >= 0:
                                    hs.append(_distance(cloud[pol[0][1]][pol[0][0]],
                                                        cloud[poly[1][0][polidx][0][1]][
                                                            poly[1][0][polidx][0][0]]))
                            if len(hs) == 0:
                                end_h = 0
                            else:
                                hs = np.array(hs)
                                end_h = np.mean(hs)
                            result = underside_S * end_h
                            cal_results.append(result)
                        elif poly[0] == 1:
                            d1 = _distance(cloud[poly[1][0][0][0][1]][poly[1][0][0][0][0]],
                                           cloud[poly[1][0][1][0][1]][poly[1][0][1][0][0]])
                            d2 = _distance(cloud[poly[1][0][2][0][1]][poly[1][0][2][0][0]],
                                           cloud[poly[1][0][2][0][1]][poly[1][0][2][0][0]])
                            result = 4 * math.pi * ((d1 + d2) / 4) ** 3 / 3
                            cal_results.append(result)
                        else:
                            underside_S = S_for_poly(poly[1][0], cloud)
                            hs = []
                            for polidx, pol in enumerate(poly[1][1]):
                                if pol[0][0] >= 0 and pol[0][1] >= 0:
                                    hs.append(_distance(cloud[pol[0][1]][pol[0][0]],
                                                        cloud[poly[1][0][polidx][0][1]][
                                                            poly[1][0][polidx][0][0]]))
                            if len(hs) == 0:
                                end_h = 0
                            else:
                                hs = np.array(hs)
                                end_h = np.mean(hs)
                            result = underside_S * end_h
                            cal_results.append(result)
        cal_results = np.array(cal_results)
        cal_results = cal_results/1000
        for pidx, poly in enumerate(polys):
            if isinstance(poly[0], int):
                all_points = np.array([])
                for p1 in poly[1]:
                    all_points = np.concatenate((all_points, p1))
                #     print(p1)
                # print(all_points)
                # input()
                rect_po = poly_rect(all_points, True)
                cv2.rectangle(resShow, (rect_po[0], rect_po[1]), (rect_po[2], rect_po[3]), (255,255,0))
                cv2.putText(resShow, '%.2f'%cal_results[pidx], (int((rect_po[0] + rect_po[2]) / 2),
                                            int((rect_po[1] + rect_po[3]) / 2)),
                            cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255))
            else:
                rect_po = poly_rect(poly)
                cv2.rectangle(resShow, (rect_po[0], rect_po[1]), (rect_po[2], rect_po[3]), (255, 255, 0))
                cv2.putText(resShow, '%.2f' % cal_results[pidx], (int((rect_po[0] + rect_po[2]) / 2),
                                                                  int((rect_po[1] + rect_po[3]) / 2)),
                                                                  cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255))
        cv2.imshow('measure', resShow)
        if self.bCollected:
            cv2.imwrite('detectresult/%d.jpg'%self.detectCount, resShow)
        if self.bAuto:
            cv2.waitKey(40)
        else:
            cv2.waitKey()
        self.detectCount+=1
        self.cfg.set('binocular_cameras', 'detectCount', str(self.detectCount))
        self.cfg.write(open('SWQT.ini', 'w'))
        cv2.destroyAllWindows()
        return 0