from ctypes import *
import cv2
import numpy as np
import numpy.ctypeslib as npct
import time
import math
from PIL import Image
import socket
import struct
# ---------------------------------------------------------------------------------------------------------
#   classes        coco数据集的种类,网络返回‘0’时，对应着person，依次类推
# ---------------------------------------------------------------------------------------------------------
classes = ('person','bicycle','car','motorbike','aeroplane','bus','train','truck','boat','traffic light',
'fire hydrant','stop sign','parking meter','bench','bird','cat','dog','horse','sheep','cow','elephant',
'bear','zebra','giraffe','backpack','umbrella','handbag','tie','suitcase','frisbee','skis','snowboard',
'sports ball','kite','baseball bat','baseball glove','skateboard','surfboard','tennis racket','bottle',
'wine glass','cup','fork','knife','spoon','bowl','banana','apple','sandwich','orange','broccoli','carrot',
'hot dog','pizza','donut','cake','chair','sofa','pottedplant','bed','diningtable','toilet','tvmonitor',
'laptop','mouse','remote','keyboard','cell phone','microwave','oven','toaster','sink','refrigerator',
'book','clock','vase','scissors','teddy bear','hair drier','toothbrush')


# ---------------------------------------------------------------------------------------------------------
#   Detector()       配置tensorrt加速
# ---------------------------------------------------------------------------------------------------------
class Detector():
    def __init__(self,model_path,dll_path):
        self.yolov5 = CDLL(dll_path)
        self.yolov5.Detect.argtypes = [c_void_p,c_int,c_int,POINTER(c_ubyte),npct.ndpointer(dtype = np.float32, ndim = 2, shape = (50, 6), flags="C_CONTIGUOUS")]
        self.yolov5.Init.restype = c_void_p
        self.yolov5.Init.argtypes = [c_void_p]
        self.yolov5.cuda_free.argtypes = [c_void_p]
        self.c_point = self.yolov5.Init(model_path)

    def predict(self,img):
        rows, cols = img.shape[0], img.shape[1]
        res_arr = np.zeros((50,6),dtype=np.float32)
        self.yolov5.Detect(self.c_point,c_int(rows), c_int(cols), img.ctypes.data_as(POINTER(c_ubyte)),res_arr)
        self.bbox_array = res_arr[~(res_arr==0).all(1)]
        return self.bbox_array

    def free(self):
        self.yolov5.cuda_free(self.c_point)

# ------------------------------------visualize可视化程序-------------------------------------------------------
#   img                     输入的图片
#   bbox_array              多组yolo网络预测的结果
#   middle_x、middle_y       检测目标的中心点坐标，用于测出距离distance
# -------------------------------------------------------------------------------------------------------------
def visualize(img,bbox_array):
    people = []
    for temp in bbox_array:
        clas = int(temp[4])
        if clas == 0:
            people.append(temp)
    s = 0
    person = []
    for temp in people:
        width = temp[2]
        height = temp[3]
        i_s = width*height
        if i_s > s:
            s = temp[5]
            person = []
            person.append(temp)
    middle_x,middle_y = 0,0
    for temp in person:
        score = temp[5]
        clas = int(temp[4])
        cv2.rectangle(img,(int(temp[0]),int(temp[1])),(int(temp[0]+temp[2]),int(temp[1]+temp[3])), (0, 0, 225), 2)
        img = cv2.putText(img, str(classes[int(clas)])+" "+str(round(score,2)),
                          (int(temp[0]),int(temp[1])-5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 225), 2)
        middle_x = int(np.floor(temp[0]+temp[2]*0.5))
        middle_y = int(np.floor(temp[1]+temp[3]*0.5))
    return middle_x,middle_y,img

#---------------------------------------------------#
#   对输入图像进行不失真resize
#---------------------------------------------------#
def resize_image(image, size):
    iw, ih  = image.size
    w, h    = size
    scale   = min(w/iw, h/ih)
    nw      = int(iw*scale)
    nh      = int(ih*scale)

    image   = image.resize((nw,nh), Image.BICUBIC)
    new_image = Image.new('RGB', size, (128,128,128))
    new_image.paste(image, ((w-nw)//2, (h-nh)//2))
    return new_image

if __name__ =="__main__":
    det = Detector(model_path=b"./yolov5n.engine",dll_path="./libyolov5.so")  # b'' is needed
    # 加载视频文件
    cap = cv2.VideoCapture(-1)
    # 3 读取视频
    fps = 0.0
    ret, frame = cap.read()
    
    # 创建视频socket客户端
    video_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    video_address = "192.168.43.198"
    video_port = 1234
    
    # 创建坐标socket客户端
    middle_x_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    middle_x_address = "192.168.43.186"
    middle_x_port = 8095

    # try:
    #     middle_x_client.connect((middle_x_address, middle_x_port))
    #     #print(f"Connected to {middle_x_address}:{middle_x_port}")
    #     #break
    # except Exception as e:
    #     #print(f"Failed to connect to {middle_x_address}:{middle_x_port}: {e}")
    #     time.sleep(2)  # 等待2秒后重试

    while True:
            
        # 是否读取到了帧，读取到了则为True
        ret, frame = cap.read()
        if ret:
            # 开始计时，用于计算帧率
            t1 = time.time()
            img_color = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame1 = frame[0:480, 0:640]
            frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
            # 转变成Image格式
            frame1 = Image.fromarray(np.uint8(frame1))
            frame1_shape = np.array(np.shape(frame1)[0:2])
            # 调整图片大小、颜色通道，使其适应YOLO推理的格式
            # frame1 = resize_image(frame1,(640,480))
            frame1 = cv2.cvtColor(np.array(frame1), cv2.COLOR_RGB2BGR)

            # 推理图片
            result = det.predict(frame1)
            # 画框，标出识别的类别、距离、置信度等
            middle_x,middle_y,frame1 = visualize(frame1, result)
            #print(middle_x)

            # 计算帧率
            fps = (fps + (1. / (time.time() - t1))) / 2
            #print("fps= %.2f" % (fps))
            frame1 = cv2.putText(frame1, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            #cv2.imshow("frame1", frame1)
            frame = cv2.resize(frame1,(320,240))
            encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 50]
            encoded,buffer = cv2.imencode('.jpg', frame, encode_param)
            message = struct.pack("Q", len(buffer)) + buffer.tobytes()
            
            try:
                video_client.sendto(message, (video_address, video_port))
                middle_x_client.sendto(str(middle_x).encode(), (middle_x_address, middle_x_port))
                time.sleep(0.05)
            except Exception as e:
                print(e)
            # 若键盘按下q则退出播放
            if cv2.waitKey(1) & 0xff == ord('q'):
                break
    # 4 释放资源
    det.free()
    cap.release()

    # 5 关闭所有窗口
    cv2.destroyAllWindows()
