import pickle
from datetime import datetime
import socket
import cv2
from threading import Thread
from queue import Queue
from playsound import playsound
from ultralytics import YOLO
import argparse
import torch
import time
from loguru import logger
import numpy as np
from ultralytics.yolo.utils import LOGGER
import Jetson.GPIO as GPIO


class ServerClass():
    # global frame
    def __init__(self):
        self.addr = ('192.168.1.113', 8081)   # 需改成nano的地址
        # self.addr = ('192.168.1.101', 8082) # PC地址（调试用）
        # self.addr = ('192.168.43.21', 8081) # P30
        self.encode_param = [cv2.IMWRITE_JPEG_QUALITY, 50]
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)  # 创建一个套接字，IP，TCP协议
        self.connect_flag = False
        
    # 辅助线程，负责和一个客户端进行通信
    def accept_client(self):
        server_socket = self.socket
        server_socket.bind(self.addr)  # 绑定本机IP和PORT
        server_socket.listen(2)  # 最多同时连接2个client
        print("等待client连接……")
        # 第一层while 等待连接
        while True:
            # Establish connection with client.
            new_server_socket, addr = server_socket.accept()
            # tcp_socket.setblocking(0)  # 非阻塞模型
            connect_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            print('\n客户端 {} 连接成功，时间：{}'.format(addr, connect_time))
            self.connect_flag = True
            # 第二层while 发送img消息
            while True:
                frame, result = q.get()
                # start = time.time()
                # 将frame编码成ndarray
                _, encoded_frame = cv2.imencode('.jpg', frame, self.encode_param)
                result_list = [encoded_frame, result]
                data_string = pickle.dumps(result_list)
                # 检查client是否关闭连接
                try:
                    # new_server_socket.send(str(encoded_frame.size).encode("utf-8").ljust(16))  # 发送数据的size
                    new_server_socket.sendall(str(len(data_string)).encode("utf-8").ljust(16))  # 发送数据的size
                    # new_server_socket.send(encoded_frame)  # 发送数据
                    new_server_socket.send(data_string)  # 发送数据
                except (ConnectionResetError, ConnectionAbortedError, BrokenPipeError):
                    new_server_socket.close()
                    disconnect_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    print('\n客户端 {} 断开连接，时间：{}'.format(addr, disconnect_time))
                    self.connect_flag = False
                    break

    def get_connect_flag(self):
        return self.connect_flag

# class MyDetector_yolov8():
#     def __init__(self):
#         self.model = YOLO("weights/best.pt")
#         # 设置相机分辨率
#         self.cap = cv2.VideoCapture(0)
#         self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
#         self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
#         self.class_results = torch.tensor([0])
#
#     def inference(self,img):
#         results = self.model.predict(source=img, stream=False, post_img=True, show=True)  # return a list
#         boxes = results[0].boxes
#         post_img = results[1]
#         return boxes.cls, post_img
#
#     def detect(self):
#         while self.cap.isOpened():
#             rect, frame = self.cap.read()
#             # 推理
#             start = time.time()
#             self.class_results, frame = self.inference(frame)
#             use_time = time.time() - start
#             print("\rfps:", 1 / use_time, end="")
#
#             # client端连接上 and q未满，才往q中放入frame
#             if serverclass.get_connect_flag and not q.full():
#             # if serverclass.get_connect_flag:
#                 q.put(frame)
#
#     # 语音报警
#     def voice_alarm(self):
#         while True:
#             if torch.any(torch.eq(self.class_results, 4)):  # 判断是否存在4对应“None”(head)
#                 time.sleep(0.5)     # 防止误判
#                 if torch.any(torch.eq(self.class_results, 4)):
#                     playsound('voice/Alert.mp3')  # 先放一个短音频，防止直接播放长音频时前面的字不播放出来
#                     playsound('voice/Please.mp3')

class MyDetector_TensorRT():
    def __init__(self):
        self.detector = YOLO("weights/fp16/best.engine")
        # 设置相机分辨率
        self.cap = cv2.VideoCapture(0)
        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
        self.results = torch.tensor([0])
        # 语音模块init
        self.pin = 21                        # 选择引脚号（此处以引脚21为例）
        GPIO.setmode(GPIO.BCM)               # 设置引脚为BCM模式
        GPIO.setup(self.pin, GPIO.OUT)       # 设置引脚为输出模式
        GPIO.output(self.pin, GPIO.HIGH)     # 将引脚设置为高电平，上拉

    # def inference(self,img):
    #     results, use_time = self.detector(img)
    #     self.detector.draw(imgs=img, results=results, class_names=self.detector.names, draw_label=True, line_thickness=2)
    #     fps = 1 / sum(use_time)
    #     cv2.putText(img, "fps:{:.0f}".format(fps), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
    #     print("\rfps:{:.0f}".format(fps), end="")
    #     return results, img
    # results：      x,          y,          w,              h,          conf,       class
    # [tensor([[1.84925e+02, 9.39375e+01, 2.35450e+02, 1.60400e+02, 8.37402e-01, 2.00000e+00],
    #         [9.13750e+00, 1.09769e+02, 6.04250e+01, 1.75350e+02, 8.32031e-01, 2.00000e+00],
    #         [4.29188e+02, 9.77000e+01, 4.77000e+02, 1.59988e+02, 8.17383e-01, 2.00000e+00],
    #         [5.03375e+02, 9.64000e+01, 5.51888e+02, 1.58806e+02, 8.10547e-01, 2.00000e+00],
    #         [9.33800e+02, 6.38750e+01, 1.00133e+03, 1.53000e+02, 8.03223e-01, 2.00000e+00],
    #         [8.27275e+02, 6.88000e+01, 8.71987e+02, 1.26750e+02, 8.03223e-01, 2.00000e+00],
    #         [7.65175e+02, 5.34500e+01, 8.21638e+02, 1.31225e+02, 8.00781e-01, 2.00000e+00],
    #         [5.87425e+02, 1.12425e+02, 6.37275e+02, 1.74500e+02, 7.99316e-01, 2.00000e+00],
    #         [2.63694e+02, 1.25025e+02, 3.09550e+02, 1.84663e+02, 7.99316e-01, 2.00000e+00],
    #         [8.63025e+02, 8.42750e+01, 9.18450e+02, 1.55088e+02, 7.89062e-01, 2.00000e+00],
    #         [9.48250e+01, 1.35875e+02, 1.43062e+02, 1.95550e+02, 7.81250e-01, 2.00000e+00],
    #         [6.92675e+02, 9.03125e+01, 7.43737e+02, 1.57175e+02, 7.78809e-01, 2.00000e+00],
    #         [3.35200e+02, 8.67500e+01, 3.81500e+02, 1.53275e+02, 7.70508e-01, 2.00000e+00]])]
    def detect(self):
        while self.cap.isOpened():
            rect, frame = self.cap.read()
            # 推理
            detected_results = self.detector.predict(imgsz=(480, 640), source=frame, verbose=False, stream=False,
                                                     data="data/GDUT-HWD_trainval.yaml", conf=0.5)
            self.results = detected_results[0].boxes.boxes
            # post_img = detected_results[1]
            use_time = detected_results[2]
            fps = 1 / sum(use_time)
            cv2.putText(frame, "fps:{:.0f}".format(fps), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
            print("\rfps:{:.0f}".format(fps), end="")
            result_list = [frame, self.results]

            # client端连接上 and q未满，才往q中放入frame
            if serverclass.get_connect_flag and not q.full():
                # q.put(frame)
                q.put(result_list)

    # 语音报警
    def voice_alarm(self):
        while True:
            # self.results是bbox tensor：tensor([[170.00000, 105.00000, 350.00000, 340.00000,   0.85303,   4.00000]], device='cuda:0')
            # 当没检测到目标时的self.results: tensor([], device='cuda:0', size=(0, 6))
            try:
                if torch.any(torch.eq(self.results, 4)):  # 判断是否存在4对应“None”(head)
                    time.sleep(0.5)     # 防止误判
                    if torch.any(torch.eq(self.results, 4)):
                        # 语音模块是低电平触发
                        GPIO.output(self.pin, GPIO.LOW)
                        time.sleep(0.2)
                        # 将引脚设置为高电平
                        GPIO.output(self.pin, GPIO.HIGH)
                        time.sleep(0.2)
            except TypeError:
                pass

if __name__ == "__main__":
    q = Queue(5)
    serverclass = ServerClass()
    detector = MyDetector_TensorRT()

    # 辅助线程1: 负责接收client的连接请求
    th_srever = Thread(target=serverclass.accept_client)
    # after python 3.10, th.setDaemon(True) instead by th.setDaemon
    th_srever.setDaemon  # 开启守护线程模式，主线程关闭
    th_srever.start()

    # 辅助线程2: 负责语音报警
    th_voice_alarm = Thread(target=detector.voice_alarm)
    # after python 3.10, th.setDaemon(True) instead by th.setDaemon
    th_voice_alarm.setDaemon  # 开启守护线程模式，主线程关闭
    th_voice_alarm.start()

    # 主线程：推理
    detector.detect()


