# Modified by Fu Shuai for test
from winsound import PlaySound
import cv2
import time
import numpy as np
import stereo_config_200
import os
import math
import pcl
import pcl.pcl_visualization

#####################识别#####################
weightsPath='D:/class/yolov3_last.weights'  # 模型权重文件
configPath='D:/class/yolov3.cfg'  # 模型配置文件
labelsPath = 'D:/class/obj.names'  # 模型类别标签文件
# 初始化一些参数
LABELS = open(labelsPath).read().strip().split("\n")
boxes = []
confidences = []
classIDs = []
class_result = []# 存储识别框，在定位模块将框以外的图像设置为空
# 加载网络配置与训练的权重文件 构建网络
net = cv2.dnn.readNetFromDarknet(configPath,weightsPath)

# 环境信息识别，边缘检测双阈值设置
def Otsu2Threshold(src):
    theta = 0.000001 # 避免分母=0报错
    Threshold1 = 0
    Threshold2 = 0
    weight, height = np.shape(src)
    hest = np.zeros([256], dtype=np.int32)
    for row in range(weight):
        for col in range(height):
            pv = src[row, col]
            hest[pv] += 1
    tempg = -1
    N_blackground = 0
    N_object = 0
    N_all = weight * height
    for i in range(256):
        N_object += hest[i]
        for k in range(i, 256, 1):
            N_blackground += hest[k]
        for j in range(i, 256, 1):
            gSum_object = 0
            gSum_middle = 0
            gSum_blackground = 0
 
            N_middle = N_all - N_object - N_blackground
            w0 = N_object / N_all
            w2 = N_blackground / N_all
            w1 = 1 - w0 - w2
            for k in range(i):
                gSum_object += k*hest[k]
            u0 = gSum_object/(N_object + theta)
            for k in range(i+1, j, 1):
                gSum_middle += k*hest[k]
            u1 = gSum_middle / (N_middle + theta)
 
            for k in range(j+1, 256, 1):
                gSum_blackground += k*hest[k]
            u2 = gSum_blackground / (N_blackground + theta)
 
            u = w0 * u0 + w1 * u1 + w2 * u2
            g = w0 * (u - u0) * (u - u0) + w1 * (u - u1) * (u - u1) + w2 * (u - u2) * (u - u2)
            if tempg < g:
                tempg = g
                Threshold1 = i
                Threshold2 = j
            N_blackground -= hest[j]
    return Threshold1,Threshold2
#####################定位#####################
# 预处理
def preprocess(img1, img2):
    # 彩色图->灰度图
    im1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    im2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    # 直方图均衡化
    im1 = cv2.equalizeHist(im1)
    im2 = cv2.equalizeHist(im2)
    return im1, im2 
# 消除畸变
# 获取畸变校正和立体校正的映射变换矩阵、重投影矩阵
# stereo_config是一个类，存储着双目标定的参数:config = stereo_config.stereoCamera()
def getRectifyTransform(height, width, config):
    # 读取内参和外参
    global left_K,left_distortion,R1,P1
    left_K = config.cam_matrix_left
    right_K = config.cam_matrix_right
    left_distortion = config.distortion_l
    right_distortion = config.distortion_r
    R = config.R
    T = config.T
    
    # 计算校正变换
    # R1为旋转矩阵，相机坐标系的校正；P1为新投影矩阵，即新相机坐标系点投影于图像坐标系；
    R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(left_K, left_distortion, right_K, right_distortion, (width, height), R, T, alpha=0)
    map1x, map1y = cv2.initUndistortRectifyMap(left_K, left_distortion, R1, P1, (width, height), cv2.CV_32FC1)
    map2x, map2y = cv2.initUndistortRectifyMap(right_K, right_distortion, R2, P2, (width, height), cv2.CV_32FC1)
    return map1x, map1y, map2x, map2y, Q 
# 畸变校正和立体校正
def rectifyImage(image1, image2, map1x, map1y, map2x, map2y):
    rectifyed_img1 = cv2.remap(image1, map1x, map1y, cv2.INTER_AREA)
    rectifyed_img2 = cv2.remap(image2, map2x, map2y, cv2.INTER_AREA)
    return rectifyed_img1, rectifyed_img2
# 图像识别框坐标校正
def rectifyboxes(input_x1, input_y1, input_x2, input_y2):
    src = np.zeros(shape = (2,1,2))
    src[0][0][0] = input_x1
    src[0][0][1] = input_y1
    src[1][0][0] = input_x2
    src[1][0][1] = input_y2
    
    points_reflicted = cv2.undistortPoints(src, left_K, left_distortion,R=R1,P=P1)
    return points_reflicted
#定位时选取五个点以保证中心点不为空，减小误差
def five_points(box_rectify):
    left_up_X = box_rectify[0, 0, 0]
    left_up_Y = box_rectify[0, 0, 1]
    right_down_X = box_rectify[1, 0, 0]
    right_down_Y = box_rectify[1, 0, 1]
    w = right_down_X - left_up_X  #识别框宽度
    h = right_down_Y - left_up_Y  #识别框高度
    uppoint = [int(left_up_X + w /2), int(left_up_Y + h /4)]
    downpoint = [int(left_up_X + w /2), int(left_up_Y + 3*h /4)]
    centerpoint = [int(left_up_X + w /2), int(left_up_Y + h /2)]
    leftpoint = [int(left_up_X + w /4), int(left_up_Y + h /2)]
    rightpoint = [int(left_up_X + 3*w /4), int(left_up_Y + h /2)]
    
    point =[uppoint, downpoint, rightpoint, leftpoint, centerpoint]
    return point
    
    
# # 通过画线检验立体校正结果
# def draw_line(image1, image2):
#     # 建立输出图像
#     height = max(image1.shape[0], image2.shape[0])
#     width = image1.shape[1] + image2.shape[1]
#     output = np.zeros((height, width, 3), dtype=np.uint8)
#     output[0:image1.shape[0], 0:image1.shape[1]] = image1
#     output[0:image2.shape[0], image1.shape[1]:] = image2 
#     for k in range(15):
#         cv2.line(output, (0, 50 * (k + 1)), (2 * width, 50 * (k + 1)), (0, 255, 0), thickness=2, lineType=cv2.LINE_AA)  # 直线间隔：100
#     return output
# 视差计算SGBM
def disparity_SGBM(left_image, right_image, down_scale=False):
    # SGBM匹配参数设置
    if left_image.ndim == 2:
        img_channels = 1
    else:
        img_channels = 3
    blockSize = 3
    param = {'minDisparity': 0,  #最小视差，默认0
             'numDisparities': 128,  #视差搜索范围，16的整数倍
             'blockSize': blockSize,  #块匹配的大小。奇数，3-11
             'P1': 8 * img_channels * blockSize ** 2,  #值越大，视差越平滑
             'P2': 32 * img_channels * blockSize ** 2,  #值越大，视差越平滑
             'disp12MaxDiff': 1,  #左右一致性检测中最大容许误差值
             'preFilterCap': 63,  #映射滤波器大小，默认15
             'uniquenessRatio': 15,  #唯一性检测参数，匹配度分区不够则误匹配
             'speckleWindowSize': 100,  #小于此则为噪声点
             'speckleRange': 1,  #大于此则为不连通
             'mode': cv2.STEREO_SGBM_MODE_SGBM_3WAY
             }
    # 构建SGBM对象
    sgbm = cv2.StereoSGBM_create(**param)
    # 计算视差图
    size = (left_image.shape[1], left_image.shape[0])
    if down_scale == False:
        disparity_left = sgbm.compute(left_image, right_image)
        disparity_right = sgbm.compute(right_image, left_image)
    else:
        left_image_down = cv2.pyrDown(left_image)
        right_image_down = cv2.pyrDown(right_image)
        factor = size[0] / left_image_down.shape[1]
        disparity_left_half = sgbm.compute(left_image_down, right_image_down)
        disparity_right_half = sgbm.compute(right_image_down, left_image_down)
        disparity_left = cv2.resize(disparity_left_half, size, interpolation=cv2.INTER_AREA) 
        disparity_right = cv2.resize(disparity_right_half, size, interpolation=cv2.INTER_AREA)
        disparity_left *= factor 
        disparity_right *= factor
    return disparity_left, disparity_right
# 读取相机内参和外参
config = stereo_config_200.stereoCamera()
#####################点云#####################
# 将h×w×3数组转换为N×3的数组
def hw3ToN3(points):
    height, width = points.shape[0:2]
 
    points_1 = points[:, :, 0].reshape(height * width, 1)
    points_2 = points[:, :, 1].reshape(height * width, 1)
    points_3 = points[:, :, 2].reshape(height * width, 1)
 
    points_0 = np.hstack((points_1, points_2, points_3))
 
    return points_0
 
 
# 深度、颜色转换为点云
def DepthColor2Cloud(points_3d, colors):
    rows, cols = points_3d.shape[0:2]
    size = rows * cols
 
    points_ = hw3ToN3(points_3d).astype(np.int16)
    colors_ = hw3ToN3(colors).astype(np.int64)
 
    # 颜色信息
    blue = colors_[:, 0].reshape(size, 1)
    green = colors_[:, 1].reshape(size, 1)
    red = colors_[:, 2].reshape(size, 1)
 
    rgb = np.left_shift(blue, 0) + np.left_shift(green, 8) + np.left_shift(red, 16)
 
    # 将坐标+颜色叠加为点云数组
    pointcloud = np.hstack((points_, rgb)).astype(np.float32)
 
    # 删掉一些不合适的点
    X = pointcloud[:, 0]
    Y = pointcloud[:, 1]
    Z = pointcloud[:, 2]
 
    remove_idx1 = np.where(Z <= 0)
    remove_idx2 = np.where(Z > 1500)
    remove_idx3 = np.where(X > 1000)
    remove_idx4 = np.where(X < -1000)
    remove_idx5 = np.where(Y > 1000)
    remove_idx6 = np.where(Y < -1000)
    remove_idx = np.hstack((remove_idx1[0], remove_idx2[0], remove_idx3[0], remove_idx4[0], remove_idx5[0], remove_idx6[0]))
 
    pointcloud_filter = np.delete(pointcloud, remove_idx, 0)
 
 
    return pointcloud_filter
 
 
# 点云显示
def view_cloud(pointcloud):
   cloud = pcl.PointCloud_PointXYZRGBA()
   cloud.from_array(pointcloud)

   try:
       visual = pcl.pcl_visualization.CloudViewing()
       visual.ShowColorACloud(cloud)
       v = True
       while v:
           v = not (visual.WasStopped())
   except:
       pass

#####################拍照#####################
# 定义双目摄像头
# videocapture(0)是默认值
left_camera = cv2.VideoCapture(1,cv2.CAP_DSHOW)  # 打开摄像头
left_camera.set(cv2.CAP_PROP_FRAME_WIDTH,640)
left_camera.set(cv2.CAP_PROP_FRAME_HEIGHT,360)
right_camera = cv2.VideoCapture(2,cv2.CAP_DSHOW)  # 打开摄像头
right_camera.set(cv2.CAP_PROP_FRAME_WIDTH,640)
right_camera.set(cv2.CAP_PROP_FRAME_HEIGHT,360)
# 设定相机拍摄模式为自动还是手动
# True自动拍照，False则手动按s键拍照
AUTO =False  
# 调整自动拍照间隔 
# 时间间隔单位为秒
INTERVAL = 0.0005 
# utc为摄像头开始工作时的初始时间
# 与后续now配合，控制自动拍照间隔
utc = time.time()
# 照片存储路径
# 具体使用相对路径还是绝对路径需要进一步考虑
folder = "D:/python_camera/" 
# 定义摄像头显示窗口基本信息
# cv2.namedWindow(‘窗口标题’,默认参数)
cv2.namedWindow("left")  
cv2.namedWindow("right")
cv2.moveWindow("left", 0, 0)   
# 照片拍摄
# pos在后续为左、右目
def shot(pos, frame):
    path = folder + pos + ".jpg"
    cv2.imwrite(path, frame)

while True:
    #####################拍照#####################
    ret, left_frame = left_camera.read()
    ret, right_frame = right_camera.read()
    cv2.imshow("left", left_frame)
    cv2.imshow("right", right_frame)   
    now = time.time()
    if AUTO and now - utc >= INTERVAL:
        shot("left", left_frame)
        shot("right", right_frame) 
        utc = now
    key = cv2.waitKey(1)  # 在一个给定的时间内(单位1ms)等待用户按键触发;如果用户没有按键,则接续等待(循环)
    if key == ord("q"):  # 按键q退出，按键s拍照并储存
        break
    elif key == ord("s"):
        print("\r\nshoting!")
        time_start = time.time() #  拍照完成后开始计时
        shot("left", left_frame)
        shot("right", right_frame) 
        #####################读取拍摄图像进行识别定位#####################
        # 读入待检测的图像
        image = cv2.imread(folder + "left.jpg")
        # 进行环境特征边线识别
        # src_line = cv2.imread(input)
        # src = cv2.imread(input,flags = 0)
        # dst1, dst2 = Otsu2Threshold(src)
        # edges = cv2.Canny(src, dst1, dst2, apertureSize= 3, L2gradient= True)
        # # 霍夫变换
        # lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 25, minLineLength=95, maxLineGap=6)
        # for i in range(len(lines)):
        #     x1,y1,x2,y2 = lines[i][0]
        #     cv2.line(src_line, (x1, y1), (x2, y2), (0, 255, 0), 1)
        # cv2.imshow('edges', edges)
        # cv2.imshow('lines', src_line)
        # cv2.waitKey(0)
        # cv2.imwrite(folder + "environment_class.jpg", src_line)
        # 得到图像的高和宽
        (H,W) = image.shape[0:2]
        # 得到YOLO需要的输出层
        ln = net.getLayerNames()
        out = net.getUnconnectedOutLayers()  # 得到未连接层得序号  [[200] /n [267]  /n [400] ]
        x = []
        for i in out:   # 1=[200]
            x.append(ln[i[0]-1])    # i[0]-1    取out中的数字  [200][0]=200  ln(199)= 'yolo_82'
        ln=x
        # ln  =  ['yolo_82', 'yolo_94', 'yolo_106']  得到 YOLO需要的输出层
        # 从输入图像构造一个blob，然后通过加载的模型，给我们提供边界框和相关概率
        # blobFromImage(image, scalefactor=None, size=None, mean=None, swapRB=None, crop=None, ddepth=None)
        # 构造了一个blob图像，对原图像进行了图像的归一化，缩放了尺寸 ，对应训练模型
        blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),swapRB=True, crop=False)
        net.setInput(blob)  # 将blob设为输入,具体作用还不是很清楚
        layerOutputs = net.forward(ln)  # ln此时为输出层名称，向前传播  得到检测结果
        for output in layerOutputs:  # 对三个输出层 循环
            for detection in output:  # 对每个输出层中的每个检测框循环
                scores=detection[5:]  # detection=[x,y,h,w,c,class1,class2] scores取第6位至最后
                classID = np.argmax(scores)  # np.argmax反馈最大值的索引
                confidence = scores[classID]
                if confidence >0.5:  # 过滤掉那些置信度较小的检测结果
                    box = detection[0:4] * np.array([W, H, W, H])
                    # print(box)
                    (centerX, centerY, width, height)= box.astype("int")
                    # 边框的左上角
                    x = int(centerX - (width / 2))
                    y = int(centerY - (height / 2))
                    # 更新检测出来的框
                    boxes.append([x, y, int(width), int(height)])
                    confidences.append(float(confidence))
                    classIDs.append(classID)
        idxs=cv2.dnn.NMSBoxes(boxes, confidences, 0.2,0.3)
        if len(idxs)>0:
            box_seq = idxs.flatten()
            for seq in box_seq:
                (x, y) = (boxes[seq][0], boxes[seq][1])  # 框左上角
                (w, h) = (boxes[seq][2], boxes[seq][3])  # 框宽高
                class_result.append([x, y, x + w, y + h])
                # 根据类别设定框的颜色
                if classIDs[seq] == 0 or classIDs[seq] == 3:
                    color = [0,0,255]  # 铆钉/长铆钉 rivet/rivet-long R
                elif classIDs[seq] == 1:
                    color = [0,255,0]  # 螺母 nut G
                elif classIDs[seq] == 2:
                    color = [255,0,0]  # 铝屑 Aluminum chips B
                else:  # classID为4
                    color = [255,0,255]  # 螺栓 screw 紫色
                cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)  # 画框
                text = "{}: {:.4f}".format(LABELS[classIDs[seq]], confidences[seq])
                cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.3, color, 1)  # 写字
        cv2.imwrite(folder + "class.jpg", image)
        # 读取图片
        iml = cv2.imread(folder + "left.jpg")   # 左图
        imr = cv2.imread(folder + "right.jpg")   # 右图
        height, width = iml.shape[0:2]
        
        # 图像置黑操作
        # 左图
        input_mask = np.uint8(255 * np.ones((480,640)))
        cv2.rectangle(input_mask, (0, 0), (640, 480), (0, 0, 0), cv2.FILLED)
        output = iml.copy()
        output[input_mask.astype(np.bool), :] = 0
        iml = output
        # 右图
        input_mask = np.uint8(255 * np.ones((480,640)))
        cv2.rectangle(input_mask, (0, 0), (640, 480), (0, 0, 0), cv2.FILLED)
        output = imr.copy()
        output[input_mask.astype(np.bool), :] = 0
        imr = output

        # 立体校正
        map1x, map1y, map2x, map2y, Q= getRectifyTransform(height, width, config)  # 获取用于畸变校正和立体校正的映射矩阵以及用于计算像素空间坐标的重投影矩阵
        iml_rectified, imr_rectified = rectifyImage(iml, imr, map1x, map1y, map2x, map2y)
        cv2.imwrite(folder + "rectified_left.jpg", iml_rectified)
        cv2.imwrite(folder + "rectified_right.jpg", imr_rectified)
        # 绘制等间距平行线，检查立体校正的效果
        #line = draw_line(iml_rectified, imr_rectified)
        #cv2.imwrite(folder + "check.jpg", line)
        # 立体匹配
        iml_, imr_ = preprocess(iml, imr)   # 预处理
        disp, _ = disparity_SGBM(iml_, imr_)   
        disp = np.divide(disp.astype(np.float32), 16.)  # 除以16得到真实视差（因为SGBM算法得到的视差是×16的）
        cv2.imwrite(folder + "disparity.jpg", disp)#存储视差图
        # 计算像素点的3D坐标（左相机坐标系下）
        points_3d = cv2.reprojectImageTo3D(disp, Q)  # 可以使用上文的stereo_config.py给出的参数
        #输入所需点坐标在[][]，得到相机坐标系下三维坐标
        print("识别出[", len(class_result), "]个多余物")
        dis_list = []
        for i in range(len(class_result)):
            print("多余物", i+1, ", 框左上右下像素坐标: ", class_result[i])
            k= 0
            avoid_zero = 0.000001
            dis_list.append(0)
            box_rectify = rectifyboxes(class_result[i][0], class_result[i][1], class_result[i][2], class_result[i][3])
            box_points = five_points(box_rectify)
            for j in range(5):
                try:
                    wanted_point = points_3d[box_points[j][1]][box_points[j][0]]
                except IndexError:
                    print("[Warning] 五点定位算得像素坐标越界，跳过")
                    continue
                print("框内五点定位:", j+1, " xyz: ", wanted_point)
                if wanted_point[2] > 0 and wanted_point[2] < 300:
                    k+=1
                    dis_list[i] += math.sqrt(wanted_point[0] ** 2 + wanted_point[1] ** 2 +wanted_point[2] ** 2)
            if dis_list[i] > 0:
                dis_list[i] /= (k + avoid_zero)
                print("avg distance: ", dis_list[i]) # 输出单位为像素，需要*相机像素尺寸（1920*1080下1.4微米每像素）
            else:
                print('disp failed or avg distance too far')    
    
        time_end = time.time() #  识别定位运行结束
        # 构建点云--Point_XYZRGBA格式
        pointcloud = DepthColor2Cloud(points_3d, iml)
        # pcl.save(pointcloud, folder + "pointcloud_.pcd")

        # 显示点云
        view_cloud(pointcloud)
        cv2.waitKey(0)
        
# 结束
left_camera.release()  # 摄像头数据释放
right_camera.release()
# cv2.destroyWindow("left")
# cv2.destroyWindow("right")
cv2.destroyAllWindows()