import os
import sys
import threading
from datetime import datetime
import cv2
import numpy as np
# import gcg_interface
import math
import time
from PyQt5 import QtCore
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import QLabel, QApplication, QWidget
from PyQt5.QtCore import Qt
from MvCameraControl_class import *
import ctypes
import platform



class Mainwindow(QWidget):
    def __init__(self, save_pic):
        super().__init__()
        self.save_pic = save_pic
        self.initUI()

    def initUI(self):
        self.setObjectName("MainWindow")
        self.resize(1600, 900)  # 扩大窗口宽度以容纳两个图像
        self.label1 = QLabel(self)
        self.label1.setAlignment(Qt.AlignCenter)
        # self.label1.setGeometry(QtCore.QRect(70, 70, 1400, 800))
        self.label1.setGeometry(QtCore.QRect(70, 70, 561, 451))

        self.label2 = QLabel(self)  # 添加第二个 QLabel
        self.label2.setAlignment(Qt.AlignCenter)
        self.label2.setGeometry(QtCore.QRect(800, 70, 561, 451))  # 设置第二个 QLabel 的位置

    def closeEvent(self, event) -> None:
        ret = cam.MV_CC_StopGrabbing()
        if ret != 0:
            print("stop grabbing fail! ret[0x%x]" % ret)
            sys.exit()

        ret = cam.MV_CC_CloseDevice()
        if ret != 0:
            print("close device fail! ret[0x%x]" % ret)
            sys.exit()

        ret = cam.MV_CC_DestroyHandle()
        if ret != 0:
            print("destroy handle fail! ret[0x%x]" % ret)
            sys.exit()
        event.accept()
        print('Exit success')

    def keyPressEvent(self, event):
        if event.key() == Qt.Key_M:
            self.save_pic.toggle_color_mode()
        elif event.key() == Qt.Key_N:
            self.save_pic.toggle_mode()


class Getimage_save(threading.Thread):
    def __init__(self):
        super(Getimage_save, self).__init__()
        self.t = 0  # 用于控制串口通信的变量
        self.color_mode = 0  # 初始颜色模式 0是blue 1 是red
        self.mode = 0  # 初始模式：0 识别模式，1 寻敌模式，2 不做任何操作

    def toggle_color_mode(self):
        self.color_mode = (self.color_mode + 1) % 2  # 切换颜色模式

    def toggle_mode(self):
        self.mode = (self.mode + 1) % 3  # 切换模式

    def calculate_angle(self, pt1, pt2, pt0):
        dx1 = pt1[0] - pt0[0]
        dy1 = pt1[1] - pt0[1]
        dx2 = pt2[0] - pt0[0]
        dy2 = pt2[1] - pt0[1]
        angle = np.arctan2(dy1, dx1) - np.arctan2(dy2, dx2)
        return np.degrees(angle)

    def rotate_image(self, image, angle):
        # Get the image dimensions
        (h, w) = image.shape[:2]
        # Calculate the center of the image
        center = (w / 2, h / 2)
        # Perform the rotation
        M = cv2.getRotationMatrix2D(center, angle, 1.0)
        rotated = cv2.warpAffine(image, M, (w, h))
        return rotated

    def run(self) -> None:
        if platform.system() == "Linux":
            libc = ctypes.CDLL("libc.so.6")  # Load libc for Linux
        elif platform.system() == "Windows":
            libc = ctypes.CDLL("msvcrt.dll")  # Load msvcrt for Windows
        else:
            raise OSError("Unsupported operating system")
        stOutFrame = MV_FRAME_OUT()
        memset(byref(stOutFrame), 0, sizeof(stOutFrame))
        while True:
            if self.mode == 0:  # 识别模式
                ret = cam.MV_CC_SetCommandValue("TriggerSoftware")
                if ret != 0:
                    print("Software trigger fail! ret[0x%x]" % ret)
                    sys.exit()

                # ret = cam.MV_CC_GetImageBuffer(stOutFrame, 10000000)
                ret = cam.MV_CC_GetImageBuffer(stOutFrame, 10000000)
                if stOutFrame.pBufAddr and ret == 0:
                    if stOutFrame.stFrameInfo.enPixelType in [17301505, 17301514,
                                                              17301513]:  # Mono8, BayerRG8, BayerBG8
                        buf_size = stOutFrame.stFrameInfo.nWidth * stOutFrame.stFrameInfo.nHeight
                    elif stOutFrame.stFrameInfo.enPixelType == 35127316:  # RGB8
                        buf_size = stOutFrame.stFrameInfo.nWidth * stOutFrame.stFrameInfo.nHeight * 3
                    else:
                        print("Unsupported pixel format: %d" % stOutFrame.stFrameInfo.enPixelType)

                        print(ret)
                        continue

                    pData = (c_ubyte * buf_size)()
                    libc.memcpy(byref(pData), stOutFrame.pBufAddr, buf_size)  # Use libc.memcpy instead of msvcrt.memcpy
                    data = np.frombuffer(pData, dtype=np.uint8)

                    if stOutFrame.stFrameInfo.enPixelType == 17301505:  # Mono8
                        image = data.reshape((stOutFrame.stFrameInfo.nHeight, stOutFrame.stFrameInfo.nWidth))
                        image_rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
                    elif stOutFrame.stFrameInfo.enPixelType in [17301514, 17301513]:  # BayerRG8, BayerBG8
                        image = data.reshape((stOutFrame.stFrameInfo.nHeight, stOutFrame.stFrameInfo.nWidth))
                        image_rgb = cv2.cvtColor(image,
                                                 cv2.COLOR_BAYER_RG2RGB if stOutFrame.stFrameInfo.enPixelType == 17301514 else cv2.COLOR_BAYER_BG2RGB)
                    elif stOutFrame.stFrameInfo.enPixelType == 35127316:  # RGB8
                        image = data.reshape((stOutFrame.stFrameInfo.nHeight, stOutFrame.stFrameInfo.nWidth, 3))
                        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

                    # 旋转图像90度
                    image_rgb = self.rotate_image(image_rgb, 270)

                    # 根据 color_mode 切换颜色过滤逻辑
                    if self.color_mode == 0:
                        lower_color = np.array([50, 30, 30])  # Example for blue
                        upper_color = np.array([130, 255, 255])
                    else:
                        lower_color = np.array([0, 43, 46])
                        upper_color = np.array([10, 255, 255])

                    hsv = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2HSV)
                    mask = cv2.inRange(hsv, lower_color, upper_color)
                    filtered_img = cv2.bitwise_and(image_rgb, image_rgb, mask=mask)

                    # 转换为灰度图
                    gray = cv2.cvtColor(filtered_img, cv2.COLOR_RGB2GRAY)
                    ret2, binary = cv2.threshold(gray, 20, 100, 0)
                    Gaussian = cv2.GaussianBlur(binary, (5, 5), 0)

                    draw_img = Gaussian.copy()
                    whole_h, whole_w = binary.shape[:2]

                    br,contours, hierarchy = cv2.findContours(image=draw_img, mode=cv2.RETR_TREE,
                                                           method=cv2.CHAIN_APPROX_NONE)
                    contours = list(contours)
                    contours.sort(key=lambda c: cv2.contourArea(c), reverse=True)

                    width_array = []
                    height_array = []
                    point_array = []

                    for cont in contours[:5]:
                        x, y, w, h = cv2.boundingRect(cont)
                        try:
                            if h / w >= 0.35 and h / whole_h > 0.035 and h > w:
                                width_array.append(w)
                                height_array.append(h)
                                point_array.append([x, y])
                        except:
                            continue

                    point_near = [0, 0]
                    if len(width_array) >= 2:
                        min_diff = 10000
                        for i in range(len(width_array) - 1):
                            for j in range(i + 1, len(width_array)):
                                value = abs(width_array[i] * height_array[i] - width_array[j] * height_array[j])
                                if value < min_diff:
                                    min_diff = value
                                    point_near[0] = i
                                    point_near[1] = j

                        try:
                            rectangle1 = point_array[point_near[0]]
                            rectangle2 = point_array[point_near[1]]
                            combined_width = abs(rectangle1[0] - rectangle2[0]) + min(width_array[point_near[0]],
                                                                                      width_array[point_near[1]])
                            combined_height = abs(rectangle1[1] - rectangle2[1]) + max(height_array[point_near[0]],
                                                                                       height_array[point_near[1]])

                            # 拓展矩形高度
                            expand_ratio = 0.65  # 拓展比例
                            expand_height = int(combined_height * expand_ratio)
                            rectangle1[1] -= expand_height // 2
                            rectangle2[1] -= expand_height // 2
                            height_array[point_near[0]] += expand_height
                            height_array[point_near[1]] += expand_height

                            print("长度：" + str(combined_width))
                            print(combined_height / combined_width)
                            if combined_height / combined_width > 0.1 and combined_height / combined_width < 0.8 and combined_width < 1500:  # 长宽比判断
                                point1 = [int(rectangle1[0] + width_array[point_near[0]] / 2), int(rectangle1[1])]
                                point2 = [int(rectangle1[0] + width_array[point_near[0]] / 2),
                                          int(rectangle1[1] + height_array[point_near[0]])]
                                point3 = [int(rectangle2[0] + width_array[point_near[1]] / 2), int(rectangle2[1])]
                                point4 = [int(rectangle2[0] + width_array[point_near[1]] / 2),
                                          int(rectangle2[1] + height_array[point_near[1]])]
                                print(point1, point2, point3, point4)
                                x = np.array([point1, point2, point4, point3], np.int32)
                                box = x.reshape((-1, 1, 2)).astype(np.int32)
                                cv2.polylines(image_rgb, [box], True, (0, 255, 0), 2)
                                # 绘制对角线
                                cv2.line(image_rgb, tuple(point1), tuple(point4), (255, 0, 0), 2)
                                cv2.line(image_rgb, tuple(point2), tuple(point3), (255, 0, 0), 2)
                                # 截取矩形框内的图像
                                x1, y1 = min(point1[0], point3[0]), min(point1[1], point3[1])
                                x2, y2 = max(point2[0], point4[0]), max(point2[1], point4[1])
                                cropped_image = image_rgb[y1:y2, x1:x2]

                                # 保存截取的图像
                                cropped_image_filename = f"C:/Packing_pic/{now[0:10]}/cropped_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.jpeg"
                                cv2.imwrite(cropped_image_filename, cropped_image)

                                # 预处理图像并进行预测
                                # preprocessed_image = self.recognizer.preprocess_image(cropped_image)
                                # label = self.recognizer.predict(preprocessed_image)
                                # self.x = print(f"Recognized Label: {label}")

                                # 计算目标中心
                                target_center_x = (point1[0] + point3[0]) // 2
                                target_center_y = (point1[1] + point3[1]) // 2

                                # 计算图像中心
                                img_center_x = image_rgb.shape[1] // 2
                                img_center_y = image_rgb.shape[0] // 2

                                # 计算偏差
                                delta_x = target_center_x - img_center_x
                                delta_y = target_center_y - img_center_y

                                # 设置移动速度和方向
                                move_x = int(delta_x * 0.1)  # 调整系数以控制移动速度
                                move_y = int(delta_y * 0.1)
                                print(f"Moving: ({move_x}, {move_y})")

                                # 使用gcg_interface进行串口通信
                                if int(math.sin(0) * 32767) < 0:
                                    move_x = int(math.sin(0) * 32767) + move_x * 300
                                    move_y = int(math.sin(0) * 8000) + move_y * 185
                                else:
                                    move_x = int(math.sin(0) * 32767) + move_x * -300
                                    move_y = int(math.sin(0) * 8000) + move_y * -185
                                # if delta_x > 15:
                                #     if int(math.sin(0) * 32767) < 0:
                                #         move_x = int(math.sin(0) * 32767) + move_x * 300
                                #         move_y = int(math.sin(0) * 8000) + move_y * 185
                                #     else:
                                #         move_x = int(math.sin(0) * 32767) + move_x * -300
                                #         move_y = int(math.sin(0) * 8000) + move_y * -185
                                # else:
                                #     if int(math.sin(0) * 32767) < 0:
                                #         move_x = int(math.sin(0) * 32767) + move_x * 120
                                #         move_y = int(math.sin(0) * 8000) + move_y * 185
                                #     else:
                                #         move_x = int(math.sin(0) * 32767) + move_x * -120
                                #         move_y = int(math.sin(0) * 8000) + move_y * -185
                                gcg_interface.move(move_x, move_y)
                                print("move:" + str(move_y))
                        except Exception as e:
                            print(f"Error processing rectangles: {e}")

                    q_image = QImage(image_rgb.data, image_rgb.shape[1], image_rgb.shape[0], QImage.Format_RGB888)
                    pixmap = QPixmap.fromImage(q_image)
                    h, w = main.label1.height(), main.label1.width()
                    pixmap = pixmap.scaled(w, h, Qt.KeepAspectRatio)
                    self.pixmap = main.label1.setPixmap(pixmap)
                    main.label1.update()

                    # 显示二值化后的图像
                    q_binary_image = QImage(Gaussian.data, Gaussian.shape[1], Gaussian.shape[0],
                                            QImage.Format_Grayscale8)
                    pixmap_binary = QPixmap.fromImage(q_binary_image)
                    h2, w2 = main.label2.height(), main.label2.width()
                    pixmap_binary = pixmap_binary.scaled(w2, h2, Qt.KeepAspectRatio)
                    main.label2.setPixmap(pixmap_binary)
                    main.label2.update()

                    now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
                    if not os.path.exists(f'C:/Packing_pic/{now[0:10]}'):
                        os.makedirs(f'C:/Packing_pic/{now[0:10]}', exist_ok=True)
                    cv2.imwrite(f"C:/Packing_pic/{now[0:10]}/{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.jpeg",
                                image_rgb)

                    cam.MV_CC_FreeImageBuffer(stOutFrame)

            elif self.mode == 1:  # 寻敌模式
                move_x = int(math.sin(self.t) * 32767)
                move_y = int(math.sin(self.t) * 8000)
                # gcg_interface.move(move_x, move_y)
                self.t += 0.1  # 自增参数
                time.sleep(0.1)

            elif self.mode == 2:  # 不做任何操作的模式
                # gcg_interface.move(0, 0)
                time.sleep(0.1)


if __name__ == '__main__':

    save_pic = Getimage_save()

    app = QApplication(sys.argv)
    main = Mainwindow(save_pic)
    main.show()

    deviceList = MV_CC_DEVICE_INFO_LIST()
    tlayerType = MV_GIGE_DEVICE | MV_USB_DEVICE
    ret = MvCamera.MV_CC_EnumDevices(tlayerType, deviceList)

    print("Find %d devices!" % deviceList.nDeviceNum)
    cam_dict = {}
    for i in range(0, deviceList.nDeviceNum):
        mvcc_dev_info = cast(deviceList.pDeviceInfo[i], POINTER(MV_CC_DEVICE_INFO)).contents
        strSerialNumber = ""
        if mvcc_dev_info.nTLayerType == MV_USB_DEVICE:
            for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chSerialNumber:
                if per == 0:
                    break
                strSerialNumber = strSerialNumber + chr(per)
            print("USB device serial number: %s" % strSerialNumber)
        elif mvcc_dev_info.nTLayerType == MV_GIGE_DEVICE:
            for per in mvcc_dev_info.SpecialInfo.stGigEInfo.chSerialNumber:
                if per == 0:
                    break
                strSerialNumber = strSerialNumber + chr(per)
            print("GigE device serial number: %s" % strSerialNumber)
        cam_dict[strSerialNumber] = i
    print("Device dictionary: ", cam_dict)

    cam_sn = ['DA3217436']
    if cam_sn[0] not in cam_dict:
        print(f"Serial number {cam_sn[0]} not found in the device list.")
        sys.exit()

    cam = MvCamera()
    stDeviceList = cast(deviceList.pDeviceInfo[cam_dict[cam_sn[0]]], POINTER(MV_CC_DEVICE_INFO)).contents
    ret = cam.MV_CC_CreateHandle(stDeviceList)
    if ret != 0:
        print("create handle fail! ret[0x%x]" % ret)
        sys.exit()

    ret = cam.MV_CC_OpenDevice(MV_ACCESS_Exclusive, 0)
    if ret != 0:
        print("open device fail! ret[0x%x]" % ret)
        sys.exit()

    ret = cam.MV_CC_SetEnumValue("TriggerMode", MV_TRIGGER_MODE_ON)
    if ret != 0:
        print("Set trigger mode on fail! ret[0x%x]" % ret)
        sys.exit()

    ret = cam.MV_CC_SetEnumValue("TriggerSource", MV_TRIGGER_SOURCE_SOFTWARE)
    if ret != 0:
        print("Set trigger source to software fail! ret[0x%x]" % ret)
        sys.exit()

    # 设置曝光时间以调整亮度
    exposure_time = 5000.0  # 你可以根据需要调整这个值
    ret = cam.MV_CC_SetFloatValue("ExposureTime", exposure_time)
    if ret != 0:
        print("Set exposure time fail! ret[0x%x]" % ret)
        sys.exit()

    stFloatValue = MVCC_FLOATVALUE()
    memset(byref(stFloatValue), 0, sizeof(MVCC_FLOATVALUE))
    ret = cam.MV_CC_GetFloatValue("ExposureTime", stFloatValue)
    if ret != 0:
        print("Failed to get exposure time, ret[0x%x]" % ret)
    else:
        print('Exposure_time:', stFloatValue.fCurValue)

    ret = cam.MV_CC_StartGrabbing()
    if ret != 0:
        print("Start grabbing fail! ret[0x%x]" % ret)
        sys.exit()

    save_pic.daemon = True  # Set the daemon attribute instead of using setDaemon
    save_pic.start()

    sys.exit(app.exec_())
