#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/7/18 22:08
# @Author : LiShan
# @File : debug.py
# @Software: PyCharm
# @description：后方双目摄像头测试
# 视频保存路径：path = './record/' + now + '/' + 'back.avi'

import os
# 屏蔽使用pygame时的提示文字
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
import time
from multiprocessing import Process, Queue
import cv2
import cv2 as cv
import numpy as np
import pygame
import camera_configs_back


# 获取当前时间
now = time.strftime("%Y-%m-%d-%H_%M", time.localtime(time.time()))
# 生成视频保存的文件名
path = './record/' + now + '/'
if os.path.exists(path):
    pass
else:
    os.makedirs(path)

# 记录仪
fourcc = cv.VideoWriter_fourcc(*'XVID')  # 保存视频的编码
saveName = path + 'back.avi'
out = cv.VideoWriter(
    saveName,  # 文件名
    fourcc,  # 视频编码
    20.0,  # 帧率
    (1920, 1080)  # 分辨率
)


# 双目图片保存
class CP:
    pass


# 距离语音播报 @WZ
def broadcast(bc_recv, people_recv):
    a = -1
    while True:
        for i in range(4):
            people_flag = False
            try:
                a = bc_recv.get()
            except:
                pass
            try:
                people_flag = people_recv.get()
            except:
                pass
            if a > 10:
                a = -1
            elif a == 10:
                a = 10
            elif a >= 9:
                a = 9
            elif a >= 8:
                a = 8
            elif a >= 7:
                a = 7
            elif a >= 6:
                a = 6
            elif a >= 5:
                a = 5
            elif a >= 4.5:
                a = 4.5
            elif a >= 4:
                a = 4
            elif a >= 3.8:
                a = 3.8
            elif a >= 3.6:
                a = 3.6
            elif a >= 3.4:
                a = 3.4
            elif a >= 3.2:
                a = 3.2
            elif a >= 3:
                a = 3
            elif a >= 2.8:
                a = 2.8
            elif a >= 2.6:
                a = 2.6
            elif a >= 2.4:
                a = 2.4
            elif a >= 2.2:
                a = 2.2
            elif a >= 2:
                a = 2
            elif a >= 1.9:
                a = 1.9
            elif a >= 1.8:
                a = 1.8
            elif a >= 1.7:
                a = 1.7
            elif a >= 1.6:
                a = 1.6
            elif a >= 1.5:
                a = 1.5
            elif a >= 1.4:
                a = 1.4
            elif a >= 1.3:
                a = 1.3
            elif a >= 1.2:
                a = 1.2
            elif a >= 1.1:
                a = 1.1
            elif a >= 1:
                a = 1
            elif a >= 0.9:
                a = 0.9
            elif a >= 0.8:
                a = 0.8
            elif a >= 0.7:
                a = 0.7
            elif a >= 0.6:
                a = 0.6
            elif a >= 0.5:
                a = 0.5
            elif a >= 0.4:
                a = 0.4
            elif a >= 0.3:
                a = 0.3
            elif a >= 0.2:
                a = 0.2
            elif a >= 0.1:
                a = 0.1
            elif a >= 0:
                a = 0
            else:
                a = -1
            if a > 0:
                if i == 2 and people_flag:
                    mp3 = './music/' + 'people' + '.mp3'
                else:
                    mp3 = './music/' + repr(a) + 'm.mp3'
                pygame.mixer.init()
                pygame.mixer.music.load(mp3)  # 载入一个音频文件
                pygame.mixer.music.play()     # 播放载入的音频文件
                time.sleep(1)                 # 推迟线程运行，留出时间播放音频
                pygame.mixer.music.stop()     # 停止播放音频文件
            else:
                if i == 2 and people_flag:
                    pass
                else:
                    continue


# 行人检测-删除内部框 @NXT
def is_inside(o, i):
    ox, oy, ow, oh = o
    ix, iy, iw, ih = i
    # 如果第一个矩形框被完全包含在第二个矩形框中，可确定第一个矩形框应该被丢弃
    # o为第一个框，i为第二个框
    return ox > ix and oy > iy and ox + ow < ix + iw and oy + oh < iy + ih


# 行人检测-判断目标及加矩形窗 @NXT
def draw_person(image, person):
    x, y, w, h = person
    # 矩形框颜色紫色，线宽5
    cv2.rectangle(
        image,  # 待画矩形框的原图像
        (x, y),  # 矩形框左上角坐标
        (x + w, y + h),  # 矩形框右下角坐标
        (255, 0, 255),  # 矩形框颜色
        3  # 进行框线宽
    )


# 前方目标(行人)检测 @BJJ
def ObjectDetection_front(camera_full, object_data):
    while True:
        try:
            img = camera_full.get(1)
        except:
            continue
        # 检测人的默认检测器
        # hog特征
        hog = cv2.HOGDescriptor()
        # svm分类器
        hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
        # 加载图像
        found, w = hog.detectMultiScale(img)

        # 遍历检测结果来丢弃不含有检测目标的区域
        found_filtered = []
        num = 0
        # ri、qi为框的编号，r、q为对应框的x，y，w，h坐标参数
        for ri, r in enumerate(found):
            for qi, q in enumerate(found):
                if ri != qi and is_inside(r, q):
                    break
                else:
                    num += 1
            if num == len(found):
                found_filtered.append(r)
            num = 0
        try:
            object_data.get_nowait()
        except:
            pass
        object_data.put(found_filtered)


# 前方距离信息计算
def BM_front(camera_bm, data_bm):
    # 顺序矩阵 新加入的 存0号位置
    cut_data_array = np.zeros((924, 3), dtype=np.int16)
    cv2.namedWindow("cut img", 0)
    cv2.resizeWindow("cut img", 420, 110)
    cv2.moveWindow("cut img", 55, 20)

    cv2.namedWindow("BM", 0)
    cv2.resizeWindow("BM", 420, 110)
    cv2.moveWindow("BM", 55, 25 + 150)

    cv2.namedWindow("Sampled at intervals", 0)  # 间隔采样
    cv2.resizeWindow("Sampled at intervals", 420, 110)
    cv2.moveWindow("Sampled at intervals", 55, 25 + 150 * 2)

    cv2.namedWindow("Noise reduction", 0)  # 降噪
    cv2.resizeWindow("Noise reduction", 420, 110)
    cv2.moveWindow("Noise reduction", 55, 25 + 150 * 3)

    cv2.namedWindow("Sort by distance", 0)  # 距离排序
    cv2.resizeWindow("Sort by distance", 420, 110)
    cv2.moveWindow("Sort by distance", 55, 25 + 150 * 4)

    cv2.namedWindow("recut", 0)   # 通道两边裁剪后的图像
    cv2.resizeWindow("recut", 70, 110)
    cv2.moveWindow("recut", 55 + 640, 20)
    while True:
        try:
            cp = camera_bm.get(1)
        except:
            continue
        imgL = cp.imgL
        imgR = cp.imgR
        stereo = cv2.StereoBM_create(64, 11)
        disparity = stereo.compute(imgL, imgR)
        disp = cv2.normalize(
            disparity,
            disparity,
            alpha=0,
            beta=255,
            norm_type=cv2.NORM_MINMAX,
            dtype=cv2.CV_8U
        )
        cv2.imshow("cut img", imgL)
        cv2.imshow("recut", imgL[:, 60+200:60+260])
        cv2.imshow("BM", disp)
        # 将图片扩展至3d空间中，其z方向的值则为当前的距离
        threeD = cv2.reprojectImageTo3D(
            disparity.astype(np.float32) / 16.,
            camera_configs_back.Q
        )

        # 距离数据切片  6/10  shape 22/84 float32
        cut_threeD = threeD[0:110:5, 60:480:10, 2]
        cut_threeD = cut_threeD / 10  # 精度为厘米
        cut_threeD[cut_threeD > 1400] = 1400  # 最大匹配距离
        cut_threeD[cut_threeD < 0] = 1500  # 无效值
        cut_threeD = cut_threeD.astype(np.uint16)  # 取整，改变数据类型
        cut_data = cut_threeD.reshape(924, 1)  # (462,1)  实时图像数据

        cut_data_show = cut_threeD.copy()  # 结果显示
        cut_data_show = cut_data_show / 5.88
        cut_data_show = cut_data_show.astype(np.uint8)
        cv2.imshow("Sampled at intervals", cut_data_show)

        cut_data_array = cut_data_array[:, 0:2]  # 剔除过期数据
        cut_data_array = np.concatenate((cut_data, cut_data_array), axis=1)  # 引入最新图片
        cut_data_sequence = cut_data_array.copy()  # 数据深拷贝
        cut_data_sequence.sort(axis=1)  # 数据排序

        cut_data_mask = cut_data_sequence[:, 2]  # 降噪过滤Mask
        cut_data_out = cut_data_sequence[:, 1]  # 中值数据
        cut_data_out[cut_data_mask > 1400] = 1500  # 过滤
        cut_data_out = cut_data_out.reshape(22, 42)  # 重新整形

        cut_data_show = cut_data_out.copy()  # 结果显示
        cut_data_show = cut_data_show / 5.88
        cut_data_show = cut_data_show.astype(np.uint8)
        cv2.imshow("Noise reduction", cut_data_show)

        cut_data_out.sort(axis=0)  # 数据排序

        # 现场演示，将两侧的数据截断
        # 只保留中间走廊信息
        # cut_data_out[:, 0: 19] = 1500
        # cut_data_out[:, 26: 42] = 1500
        cut_data_out[:, 0: 20] = 1500
        cut_data_out[:, 26: 42] = 1500
        deep_data = cut_data_out[4, :]  # 最终距离数据
        deep_data = deep_data.reshape(42)

        cut_data_show = cut_data_out.copy()  # 结果显示
        cut_data_show = cut_data_show / 5.88
        cut_data_show = cut_data_show.astype(np.uint8)
        cv2.imshow("Sort by distance", cut_data_show)

        try:
            data_bm.get_nowait()
        except:
            pass
        data_bm.put(deep_data)

        cv2.waitKey(1)


# 读取前方2个摄像头
def read_camera_front(camera_bm, camera_full, camera_show):
    # 开启摄像头
    # 打开摄像头实时验证
    camera1 = cv2.VideoCapture(0)  # 双目左相机
    camera2 = cv2.VideoCapture(1)  # 双目右相机
    # 使用提前拍摄好的视频文件测试
    # path = os.getcwd().replace("\\", "/")
    # camera1 = cv2.VideoCapture(path + "/demo/left001.avi")    # 双目左相机
    # camera2 = cv2.VideoCapture(path + "/demo/right002.avi")   # 双目右相机

    camera1.set(3, 640)
    camera1.set(4, 480)
    camera2.set(3, 640)
    camera2.set(4, 480)
    # open_flag：2个摄像头都成功打开标志位
    open_flag = camera1.isOpened() and camera2.isOpened()
    while open_flag:
        ret1, Left = camera1.read()
        ret2, Right = camera2.read()
        if ret1 and ret2:
            # CutImg1 = Left[60:180, 50:590]  # y x
            # CutImg2 = Right[60:180, 50:590]  # y x
            CutImg1 = Left[160:280, 50:590]  # y x
            CutImg2 = Right[160:280, 50:590]  # y x
            CutImg1 = cv2.cvtColor(CutImg1, cv2.COLOR_BGR2GRAY)
            CutImg2 = cv2.cvtColor(CutImg2, cv2.COLOR_BGR2GRAY)
            imgL = cv2.remap(
                CutImg1,
                camera_configs_back.left_map1,
                camera_configs_back.left_map2,
                cv2.INTER_LINEAR
            )
            imgR = cv2.remap(
                CutImg2,
                camera_configs_back.right_map1,
                camera_configs_back.right_map2,
                cv2.INTER_LINEAR
            )
        else:
            time.sleep(0.01)
            continue
        cp = CP()
        cp.imgL = imgL
        cp.imgR = imgR
        try:  # to BM
            camera_bm.get_nowait()
        except:
            pass
        camera_bm.put(cp)
        try:  # to ObjectDetection
            camera_full.get_nowait()
        except:
            pass
        camera_full.put(Left)
        try:  # to showimg
            camera_show.get_nowait()
        except:
            pass
        camera_show.put(Left)


# 显示前方 原始图像 距离信息 行人信息
def showImg_front(camera_show, data_bm, data_object, bc_send, show_people):
    # 窗口名称和位置
    cv2.namedWindow("Back", cv2.WINDOW_AUTOSIZE)  # 创建一个窗口，命名为"Front"
    cv2.moveWindow("Back", 600, 180)  # 移动窗口"Front"，左上角坐标为600,180
    # Depth data processing parameters 深度数据处理参数
    DDPP = [
        [0,  10, 0,  7,  0],
        [3,  17, 4, 11,  7],
        [10, 24, 4, 11, 14],
        [17, 31, 4, 11, 21],
        [24, 39, 4, 11, 28],
        [31, 42, 4, 11, 35]
    ]
    left_right_zeroimg = np.zeros((480, 640, 3), dtype=np.uint8)
    DeepData = np.zeros(42, dtype=np.uint16)
    Deepimg = left_right_zeroimg.copy()
    personImg = left_right_zeroimg.copy()
    original_img = left_right_zeroimg.copy()
    objectImg = left_right_zeroimg.copy()
    # 20帧距离信息
    distance = []
    # 单帧距离信息
    single_distance = []
    # 帧计数
    cnt = 0
    # 统计20帧运算时间
    # time_cnt = 0
    people_cnt = 0
    # 方向盘默认处于正中间
    temp = './bg/50.bmp'
    while True:
        BG = cv2.imread(temp)
        try:
            original_img = camera_show.get(1)
        except:
            pass
        Deep_sige = 0  # 对应距离 = DeepData[int((x-110)/10)]
        try:
            DeepData = data_bm.get_nowait()
            Deep_sige = 1
        except:
            pass
        person_sige = 0  # get person data
        found_filtered = 0
        try:
            found_filtered = data_object.get_nowait()
            person_sige = 1
        except:
            pass
        if person_sige:  # 行人识别标记
            # 接收到的found_filtered不为空，说明检测到了行人
            # 计数+1
            if len(found_filtered) > 0:
                people_cnt = people_cnt + 1
            personImg = left_right_zeroimg.copy()
            for person in found_filtered:
                x1, y1, w1, h1 = person
                draw_person(
                    personImg,
                    (
                        int(x1),
                        int(y1),
                        int(w1),
                        int(h1)
                    )
                )
                # x1, y1, w1, h1 = person
                # python 中数组怎么表示。输出的是检测小的区域的矩形顶点坐标，（没有输出检测到人的坐标）
        if Deep_sige:
            Deepimg = left_right_zeroimg.copy()  # 清空图像
            for i in range(6):
                Parameter = DDPP[i]
                cutData = DeepData[Parameter[0]:Parameter[1]]
                deepmin = cutData.min()
                if deepmin < 1400:
                    cutData = cutData[Parameter[2]:Parameter[3]]
                    tryResult = False
                    aims = 0
                    try:
                        aims = cutData.tolist().index(deepmin)
                        tryResult = True
                    except:
                        pass
                    if tryResult:
                        aims = aims + Parameter[4]
                        # x_num = int(aims*10+110)
                        x_num = int(aims*10+115)
                        # 转换单位为米
                        showmin = deepmin / 100.
                        if showmin < 3:    # 3米内红色
                            colorTest = (0, 0, 255)
                        elif showmin < 5:  # 5米内绿色
                            colorTest = (0, 255, 0)
                        else:              # 5米外亮蓝色
                            colorTest = (255, 255, 0)
                        cv2.putText(
                            Deepimg,
                            "%.1fM" % showmin,
                            (x_num, 450),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            1.0,
                            colorTest,
                            3
                        )
                        if showmin > 0:
                            single_distance.append(showmin)
        if len(single_distance):
            # 将单帧图像距离的最小值放入distance列表
            distance.append(min(single_distance))
        single_distance.clear()
        cnt = cnt + 1
        if cnt > 20:
            # 对20帧图像距离的最小值进行排序
            distance.sort()
            try:
                # 将20帧图像距离最小值的中值发送给语音播报模块
                dis = distance[int(len(distance) / 2)]
                bc_send.put(dis)
                print(dis)
            except:
                bc_send.put(0)
                pass
            distance.clear()
            cnt = 0
            # 20帧图象中超过4帧出现行人框，代表有行人（20%以上的概率）
            if people_cnt > 4:
                show_people.put(True)
            else:
                show_people.put(False)
            people_cnt = 0

        if person_sige or Deep_sige:
            objectImg = cv2.resize(Deepimg, (800, 600))
            personImg = cv2.resize(personImg, (800, 600))
            BG = cv2.resize(BG, (800, 600))
            objectImg = cv2.addWeighted(objectImg, 1, personImg, 1, 0)
            objectImg = cv2.addWeighted(objectImg, 1, BG, 1, 0)
        original_img = cv2.flip(src=original_img, flipCode=1)  # 车后图像镜像旋转，即左右翻转
        original_img = cv2.resize(original_img, (800, 600))
        objectImg = cv2.resize(objectImg, (800, 600))
        show_img = cv2.addWeighted(original_img, 1, objectImg, 1, 0)
        show_img1 = cv2.resize(show_img, (640, 480))
        cv2.imshow("Back", show_img1)
        if cv2.waitKey(1) & 0xFF == 27:
            break

        # 保存视频文件
        show_img1 = cv2.resize(show_img, (1920, 1080))
        out.write(show_img1)


if __name__ == '__main__':
    # 前方摄像头
    camera_front_bm = Queue(3)    # 双目数据，为了计算距离
    camera_front_full = Queue(3)  # 双目左相机数据，为了检测行人
    camera_front_show = Queue(3)  # 双目左相机数据，为了最后综合的显示

    # 前方目标(障碍物)数据传输
    object_data_front = Queue(3)  # 根据queue_camera_front_show，利用opencv自带的hog检测器检测行人

    # 前方目标距离数据传输
    bm_data_front = Queue(3)  # 根据queue_camera_bm，利用bm算法计算距离

    # 消息队列-语音数据传输
    bc = Queue()  # 根据距离，调用mp3文件播放语音

    # 消息队列-行人检测
    people = Queue()  # hog特征加svm分类

    # 读取前方2个摄像头
    rc = Process(target=read_camera_front, args=(camera_front_bm, camera_front_full, camera_front_show))
    rc.start()

    # 前方景深计算
    bm_front = Process(target=BM_front, args=(camera_front_bm, bm_data_front))
    bm_front.start()

    # 前方行人检测
    obde_front = Process(target=ObjectDetection_front, args=(camera_front_show, object_data_front))
    obde_front.start()

    # 前方综合显示
    show_front = Process(target=showImg_front, args=(camera_front_show, bm_data_front, object_data_front, bc, people))
    show_front.start()

    # 语音播报
    bc = Process(target=broadcast, args=(bc, people))
    bc.start()
