from ctypes import *
import cv2
import numpy as np
import numpy.ctypeslib as npct
import time
import math
from PIL import Image
import os
import socket
import struct
print("cv2 version is:",cv2.__version__)
# -----------------------------------双目相机的基本参数---------------------------------------------------------
#   left_camera_matrix          左相机的内参矩阵c:\ProgramData\Microsoft\Windows\Start Menu\Programs\FileZilla FTP Client\FileZilla.lnk
#   right_camera_matrix         右相机的内参矩阵
#
#   left_distortion             左相机的畸变系数    格式(K1,K2,P1,P2,0)
#   right_distortion            右相机的畸变系数
# -------------------------------------------------------------------------------------------------------------
# ------------------------------------14mm参数-------------------------------------------------------
#内参
'''
left_camera_matrix = np.array([[441.920143713546,
                                1.71098459439999,
                                318.986044024465],
                                [0,
                                442.855952256905,
                                240.70001703707],
                                [0,0,1]])
right_camera_matrix = np.array([[442.457735613416,
                                1.55198295736747,
                                355.86648505518],
                                [0,
                                442.614123881255,
                                232.090345095048],
                                [0,0,1]])
#畸变系数
left_distortion = np.array([[-0.0370002453590559,
                            -0.0950589847060166,
                            -0.000396772492469185,
                            0.00116581708229662,
                            0]])
right_distortion = np.array([[-0.0613839321795534,
                            -0.00344689485865854,
                            -0.00175955788904397,
                            0.00150990712197183,
                            0]])
#旋转矩阵
R = np.array([[0.999292228249207,
                0.00256571089798283,
                -0.03752945094618],
                [-0.002178114172415,
                0.999943909264839,
                0.0103650442740671],
                [0.0375539395987383,
                -0.0102759647595454,
                0.999241765624754]])
#平移矩阵
T = np.array([[-63.1614048101981], 
                  [0.433775751235972], 
                  [-1.52786869957642]])
'''
# ------------------------------------26.5mm参数-------------------------------------------------------
#内参

left_camera_matrix = np.array([[335.73514791566,
                                -0.731874511921937,
                                322.909744272018],
                                [0,
                                335.55542827028,
                                243.380676402428],
                                [0,0,1]])
right_camera_matrix = np.array([[336.040359146416,
                                -0.314558518222512,
                                345.151908479252],
                                [0,
                                335.596735734494,
                                235.933100504815],
                                [0,0,1]])
#畸变系数
left_distortion = np.array([[-0.0671729884111311,
                            -0.00226867715256577,
                            0.00159660436315177,
                            0.00229071232872235,
                            0]])
right_distortion = np.array([[-0.0681254193650723,
                            0.00284849515254842,
                            0.00137186177789993,
                            0.00172620053631537,
                            0]])
#旋转矩阵
R = np.array([[0.999365346521295,
                0.000290377473121344,
                -0.0356204976564704],
                [-0.0000372010398693062,
                0.999974736715963,
                0.0071080620370507],
                [0.0356216617868137,
                -0.00710222576119855,
                0.999340110073034]])
#平移矩阵
T = np.array([[-60.5633077641667], 
                  [0.0346502867236554], 
                  [-1.33159248369833]])

size = (640, 480)

R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(left_camera_matrix, left_distortion,
                                                                  right_camera_matrix, right_distortion, size, R,
                                                                  T)
# 校正查找映射表,将原始图像和校正后的图像上的点一一对应起来
left_map1, left_map2 = cv2.initUndistortRectifyMap(left_camera_matrix, left_distortion, R1, P1, size, cv2.CV_16SC2)
right_map1, right_map2 = cv2.initUndistortRectifyMap(right_camera_matrix, right_distortion, R2, P2, size, cv2.CV_16SC2)
print(Q)

# ---------------------------------------------------------------------------------------------------------
#   classes        coco数据集的种类,网络返回‘0’时，对应着person，依次类推
# ---------------------------------------------------------------------------------------------------------
classes = ('person','bicycle','car','motorbike','aeroplane','bus','train','truck','boat','traffic light',
'fire hydrant','stop sign','parking meter','bench','bird','cat','dog','horse','sheep','cow','elephant',
'bear','zebra','giraffe','backpack','umbrella','handbag','tie','suitcase','frisbee','skis','snowboard',
'sports ball','kite','baseball bat','baseball glove','skateboard','surfboard','tennis racket','bottle',
'wine glass','cup','fork','knife','spoon','bowl','banana','apple','sandwich','orange','broccoli','carrot',
'hot dog','pizza','donut','cake','chair','sofa','pottedplant','bed','diningtable','toilet','tvmonitor',
'laptop','mouse','remote','keyboard','cell phone','microwave','oven','toaster','sink','refrigerator',
'book','clock','vase','scissors','teddy bear','hair drier','toothbrush')


# ---------------------------------------------------------------------------------------------------------
#   Detector()       配置tensorrt加速
# ---------------------------------------------------------------------------------------------------------
class Detector():
    def __init__(self,model_path,dll_path):
        self.yolov5 = CDLL(dll_path)
        self.yolov5.Detect.argtypes = [c_void_p,c_int,c_int,POINTER(c_ubyte),npct.ndpointer(dtype = np.float32, ndim = 2, shape = (50, 6), flags="C_CONTIGUOUS")]
        self.yolov5.Init.restype = c_void_p
        self.yolov5.Init.argtypes = [c_void_p]
        self.yolov5.cuda_free.argtypes = [c_void_p]
        self.c_point = self.yolov5.Init(model_path)

    def predict(self,img):
        rows, cols = img.shape[0], img.shape[1]
        res_arr = np.zeros((50,6),dtype=np.float32)
        self.yolov5.Detect(self.c_point,c_int(rows), c_int(cols), img.ctypes.data_as(POINTER(c_ubyte)),res_arr)
        self.bbox_array = res_arr[~(res_arr==0).all(1)]
        return self.bbox_array

    def free(self):
        self.yolov5.cuda_free(self.c_point)

# ------------------------------------visualize可视化程序-------------------------------------------------------
#   img                     输入的图片
#   bbox_array              多组yolo网络预测的结果
#   middle_x、middle_y       检测目标的中心点坐标，用于测出距离distance
# -------------------------------------------------------------------------------------------------------------
def visualize(img,bbox_array):
    people_res = []
    for person in bbox_array:
        if int(person[4]) == 0:
            people_res.append(person)
    person_res = []
    distance_min = 10000
    middle_x,middle_y,distance = 0,0,0
    for temp in people_res:
        #bbox = [temp[0],temp[1],temp[2],temp[3]]  #xywh
        clas = int(temp[4])
        score = temp[5]
        middle_x = int(np.floor(temp[0]+temp[2]*0.5))
        middle_y = int(np.floor(temp[1]+temp[3]*0.5))
        distance = math.sqrt(threeD[middle_y][middle_x][0] ** 2 +
                             threeD[middle_y][middle_x][1] ** 2 + threeD[middle_y][middle_x][2] ** 2)
        distance = distance / 1000.0  # mm -> m

        if distance < distance_min:
            person_res = []
            distance_min = distance
            person_res.append(temp)
    #print("person_res:",person_res)
    if person_res == []:
        return middle_x,middle_y,distance,img
    else:
        person_res = person_res[0]
        middle_x = int(np.int(person_res[0]+person_res[2]*0.5))
        middle_y = int(np.int(person_res[1]+person_res[3]*0.5))
        width = int(person_res[2])
        height = int(person_res[3])
        x_left = int(middle_x - 0.5 * width)
        x_right = int(middle_x + 0.5 * width)
        if x_right >= 640:
            x_right = 640
        dis = 1000000
        for i in range(x_left+2,x_right-2,3):
            #print("检查i越界:",i,"x_left:",x_left,"x_right:",x_right)
            i_distance = math.sqrt(threeD[middle_y][i][0] ** 2 +
                             threeD[middle_y][i][1] ** 2 + threeD[middle_y][i][2] ** 2)
            i_distance = i_distance / 1000.0  # mm -> m
            if i_distance < dis:
                dis = i_distance
        distance = dis
        cv2.rectangle(img,(int(person_res[0]),int(person_res[1])),(int(person_res[0]+person_res[2]),int(person_res[1]+person_res[3])), (0, 0, 225), 2)
        img = cv2.putText(img, "score:%.2f" % person_res[5],
                            (int(person_res[0]),int(person_res[1])-5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 225), 2)
        img = cv2.putText(img, "distance:%.2f" % distance,
                            (int(person_res[0]),int(person_res[1])+15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 225), 2)
        return middle_x,middle_y,distance,img
def puttext(img, text):
    font = cv2.FONT_HERSHEY_SIMPLEX
    font_scale = 1
    font_color = (0, 255, 0)  
    thickness = 4
    text_size = cv2.getTextSize(text, font, font_scale, thickness)[0]
    text_width, text_height = text_size
    x = 10
    y = 60
    cv2.putText(img, text, (x, y), font, font_scale, font_color, thickness)
    return img
#---------------------------------------------------#
#   对输入图像进行不失真resize
#---------------------------------------------------#
def resize_image(image, size):
    iw, ih  = image.size
    w, h    = size
    scale   = min(w/iw, h/ih)
    nw      = int(iw*scale)
    nh      = int(ih*scale)

    image   = image.resize((nw,nh), Image.BICUBIC)
    new_image = Image.new('RGB', size, (128,128,128))
    new_image.paste(image, ((w-nw)//2, (h-nh)//2))
    return new_image

def client(address,port,data):
    my_client = socket.socket()
    my_client.connect((address,port))
    my_client.send(data.encode(encoding='utf-8'))
    #print("finish send:",data)
if __name__ == "__main__":
    det = Detector(model_path=b"./yolov5n.engine",dll_path="./libyolov5.so")  # b'' is needed
    print("finish load det")
    # 加载视频文件
    cap = cv2.VideoCapture(-1)
    print("open camera")
    WIN_NAME = 'Deep disp'
    cv2.namedWindow(WIN_NAME, cv2.WINDOW_AUTOSIZE)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    width = 640
    height = 480
    print("width:%d,height:%d"%(width,height))
    LeftTop = (int(width / 2) - 50,int(height / 2) - 50)
    RightBottom = (int(width / 2) + 50,int(height / 2) + 50)  
    video_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    video_address = "192.168.43.198"
    video_port = 1234

    wait_begin_time = time.time()
    #等待五秒
    script_path = os.path.dirname(os.path.abspath(__file__))
    path = script_path + "/hsv_data.txt"
    m = input("whether use new hsv data?(yes or no)")
    if m == "yes":
        os.remove(path)
    if not os.path.exists(path):
        print("file is not exist")
        while True:
            ret, img = cap.read()
            if ret:
                frame1 = img[0:, 0:640]
                frame2 = img[0:, 640:1280]
                wait_end_time = time.time()
                img = puttext(frame2,"waiting last time:%.2f"%(10 - (wait_end_time - wait_begin_time)))
                cv2.rectangle(frame2, LeftTop, RightBottom, (0,255,0),2, 4)

                frame = cv2.resize(frame2, (320, 240))
                encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 50]
                encoded, buffer = cv2.imencode('.jpg', frame, encode_param)
                message = struct.pack("Q", len(buffer)) + buffer.tobytes()
                try:
                    video_client.sendto(message, ('192.168.43.198',1234))
                except socket.error as e:
                    print(f"Socket error: {e}")

                if wait_end_time - wait_begin_time > 10:
                    break
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
                #img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
                cv2.imshow("frame2",frame2)
        cv2.destroyAllWindows()
        #学习颜色阈值
        learn_begin_time = time.time()
        H = []
        S = []
        V = []
        for i in range(100):
            t1 = time.time()
            ret,img = cap.read()
            if ret:
                frame1 = img[0:, 0:640]
                frame2 = img[0:, 640:1280]
                frame2 = cv2.cvtColor(frame2,cv2.COLOR_BGR2HSV)
                roi = frame2[LeftTop[1]:RightBottom[1],LeftTop[0]:RightBottom[0]]
                h, s, v = cv2.split(roi)
                H.append(h)
                S.append(s)
                V.append(v)
                time.sleep(0.00000001)
                t2 = time.time()
                fps = 1 / (t2 - t1)
                frame2 = cv2.cvtColor(frame2,cv2.COLOR_HSV2BGR)
                frame2 = puttext(frame2,"learning color -- FPS:%.2f"%fps)
                cv2.rectangle(frame2, LeftTop, RightBottom, (0,255,0),2, 4)
                frame = cv2.resize(frame2, (320, 240))
                encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 50]
                encoded, buffer = cv2.imencode('.jpg', frame, encode_param)
                message = struct.pack("Q", len(buffer)) + buffer.tobytes()
                try:
                    video_client.sendto(message, ('192.168.43.198',1234))
                except socket.error as e:
                    print(f"Socket error: {e}")
                #frame2 = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
                cv2.imshow("frame2",frame2)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
        cv2.destroyAllWindows()
        H_data = np.ravel(H)
        S_data = np.ravel(S)
        V_data = np.ravel(V)
        learn_end_time = time.time()
        print("learning use time:%.2f,one circle use time:%.5f"%((learn_end_time - learn_begin_time),(learn_end_time - learn_begin_time)/100))
        low_threshold = 5
        high_threshold = 95
        learn_start_time = time.time()
        H_low = np.percentile(H_data, low_threshold)
        H_high = np.percentile(H_data, high_threshold)
        S_low = np.percentile(S_data, low_threshold)
        S_high = np.percentile(S_data, high_threshold)
        V_low = np.percentile(V_data, low_threshold)
        V_high = np.percentile(V_data, high_threshold)
        data = [[H_low, H_high], [S_low, S_high], [V_low, V_high]]
        with open(path, 'w') as file:
            for row in data:
                line = ' '.join(str(x) for x in row)
                file.write(line + '\n')
        print("finish set up data")
    else:
        print("file is exist")
        with open(path, 'r') as file:
            lines = file.readlines()
            H_low, H_high = map(float, lines[0].strip().split())
            S_low, S_high = map(float, lines[1].strip().split())
            V_low, V_high = map(float, lines[2].strip().split())
    H_threshold = [H_low, H_high]
    S_threshold = [S_low, S_high]
    V_threshold = [V_low, V_high]
    print(H_threshold)
    print(S_threshold)
    print(V_threshold)
    print("begin detect")
    #阈值上下界
    lower = np.array([H_threshold[0], S_threshold[0], V_threshold[0]])
    upper = np.array([H_threshold[1], S_threshold[1], V_threshold[1]])
    # 3 读取视频
    fps = 0.0
    ret, frame = cap.read()
    stereo = cv2.cuda.createStereoBM(numDisparities=128, blockSize=15)
    stream = cv2.cuda_Stream()
    print("finish setup stereo and stream")
    f = input("whether use new ip?(yes or no)")
    if f == 'yes':
        address = input("please enter the server ip:")
        port = input("please enter the server port:")
        port = int(port)
    else:
        address = '192.168.43.198'
        port = 8095
    print("finish connect to server")
    while True:
        ret, frame = cap.read()
        if ret:
            # 是否读取到了帧，读取到了则为True
            ret, frame = cap.read()
            # 开始计时，用于计算帧率
            t1 = time.time()
            img_color = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame1 = frame[0:480, 0:640]
            frame2 = frame[0:480, 640:1280]  # 割开双目图像
            #-------------------------------------颜色阈值----------------------------------------------------------
            hsv_frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2HSV)
            hsv_frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2HSV)
            #掩膜
            mask1 = cv2.inRange(hsv_frame1, lower, upper)
            mask2 = cv2.inRange(hsv_frame2, lower, upper)
            #计算轮廓
            contours1, _ = cv2.findContours(mask1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            contours2, _ = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            mask1 = cv2.cvtColor(mask1, cv2.COLOR_GRAY2BGR)
            mask2 = cv2.cvtColor(mask2, cv2.COLOR_GRAY2BGR)
            frame1_copy = frame1.copy()
            frame2_copy = frame2.copy()
            x1,x2,y1,y2,w1,w2,h1,h2 = 0,0,0,0,0,0,0,0
            #计算frame1的最大轮廓
            max_area = 100
            flag = True
            if contours1 is not None:
                for contour1 in contours1:
                    area1 = cv2.contourArea(contour1)
                    if area1 > max_area:
                        max_area = area1
                        x1, y1, w1, h1 = cv2.boundingRect(contour1)
                if max_area > 100:      
                    cv2.rectangle(frame1_copy, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 2)
                else:
                    flag = False
                    #cv2.rectangle(mask1, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 2)
            #计算frame2的最大轮廓
            max_area = 100
            if contours2 is not None:
                for contour2 in contours2:
                    area2 = cv2.contourArea(contour2)
                    if area2 > max_area:
                        max_area = area2
                        x2, y2, w2, h2 = cv2.boundingRect(contour2)
                if max_area > 100:      
                    cv2.rectangle(frame2_copy, (x2, y2), (x2 + w2, y2 + h2), (0, 255, 0), 2)
                else:
                    flag = False
                    #cv2.rectangle(mask2, (x2, y2), (x2 + w2, y2 + h2), (0, 255, 0), 2)
            # ------------------------------------cuda双目测距----------------------------------------------------------
            # 上传
            imgL_cuda = cv2.cuda_GpuMat()
            imgR_cuda = cv2.cuda_GpuMat()
            imgL_cuda.upload(frame1)
            imgR_cuda.upload(frame2)
            imgL_cuda = cv2.cuda.cvtColor(imgL_cuda, cv2.COLOR_BGR2GRAY)
            imgR_cuda = cv2.cuda.cvtColor(imgR_cuda, cv2.COLOR_BGR2GRAY)
            # 计算视差图
            disparity_cuda = stereo.compute(imgL_cuda, imgR_cuda,stream=stream)
            disparity = disparity_cuda.download()
            # 归一化函数算法
            disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
            # 计算三维坐标数据值
            threeD = cv2.reprojectImageTo3D(disparity, Q, handleMissingValues=True)
            threeD = threeD * 16
            # ------------------------------------目标检测----------------------------------------------------------
            # 格式转变，BGRtoRGB
            frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
            # 转变成Image格式
            frame1 = Image.fromarray(np.uint8(frame1))
            frame1_shape = np.array(np.shape(frame1)[0:2])
            # 调整图片大小、颜色通道，使其适应YOLO推理的格式
            # frame1 = resize_image(frame1,(640,480))
            frame1 = cv2.cvtColor(np.array(frame1), cv2.COLOR_RGB2BGR)
            # 推理图片
            result = det.predict(frame1)
            #print("result:",result)
            # 画框，标出识别的类别、距离、置信度等
            person_x,person_y,score = 0,0,0
            person_x,person_y,score,frame1 = visualize(frame1, result)
            #画框，颜色阈值发现的障碍物
            x_mean,y_mean,distance = 0,0,0
            if flag:
                x_mean = int((x1+x1+w1+x2+x2+w2)/4)
                y_mean = int((y1+y1+h1+y2+y2+h2)/4)
                cv2.circle(frame1_copy,(x_mean,y_mean),radius=10,color=(255,0,0),thickness=-1)   
                cv2.circle(frame2_copy,(x_mean,y_mean),radius=10,color=(255,0,0),thickness=-1)
                distance = math.sqrt(threeD[y_mean][x_mean][0] ** 2 + threeD[y_mean][x_mean][1] ** 2 + threeD[y_mean][x_mean][2] ** 2)
                distance = distance / 1000
                cv2.putText(frame1_copy, "%.2fm"%distance, (x_mean, y_mean), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 255, 0), 2)
                cv2.putText(frame2_copy, "%.2fm"%distance, (x_mean, y_mean), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 255, 0), 2)
                #print("obstacles distance:%.2fm"%distance)
            img_show = cv2.hconcat([frame1_copy, frame2_copy])
            cv2.imshow("img_show", img_show)
            # 计算帧率
            fps = (fps + (1. / (time.time() - t1))) / 2
            cv2.circle(frame1,(x_mean,y_mean),radius=5,color=(255,255,0),thickness=-1)
            cv2.rectangle(frame1, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 2)
            frame1 = cv2.putText(frame1, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            cv2.imshow("frame1", frame1)
            dis_color = cv2.applyColorMap(disp, 2)
            cv2.imshow(WIN_NAME, dis_color)  # 显示深度图的双目画面
            person_data = "%d,%d,%.2f"%(person_x,person_y,score)
            obstracles_data = "%d,%d,%.2f"%(x_mean,y_mean,distance)
            data = person_data + "," + obstracles_data
            print("person:",person_data,"obstracles:",obstracles_data)
            cv2.putText(frame1, "%.2fm"%distance, (x_mean, y_mean), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 255, 0), 2)
            frame = cv2.resize(frame1, (320, 240))
            encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 50]
            encoded, buffer = cv2.imencode('.jpg', frame, encode_param)
            message = struct.pack("Q", len(buffer)) + buffer.tobytes()
            try:
                video_client.sendto(message, ('192.168.43.198',1234))
            except socket.error as e:
                print(f"Socket error: {e}")
            try:
                client('192.168.43.186',8095,data)
            except socket.error as e:
                print(f"Socket error: {e}")
            # 若键盘按下q则退出播放
            if cv2.waitKey(1) & 0xff == ord('q'):
                break
    # 4 释放资源
    det.free()
    cap.release()
    # 5 关闭所有窗口
    cv2.destroyAllWindows()
