"""在本地电脑运行"""
import socket
import cv2
import numpy as np
import typing


class ReceiveImg(object):
    """图传接收器"""
    def __init__(self, host, port):
        """初始化
        * host: 树莓派的IP地址
        * port: 端口号，与树莓派设置的端口号一致"""
        self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)					# 设置创建socket服务的Client客服务的参数
        self.client_socket.connect((host, port))												# 连接的主机IP地址和端口
        self.connection = self.client_socket.makefile('rb')										# 创建一个makefile传输文件，读功能，读数据是b''二进制类型
        # need bytes here
        self.stream_bytes = b' '											# 创建一个变量，存放的数据类型是b''二进制类型数据
        
        print(" ")
        print("已连接到服务端：")
        print("Host : ", host)
        print("请按‘q’退出图像传输!")

    def receive(self) -> cv2.typing.MatLike|None:
        """接收图像数据"""
        try:
            msg = self.connection.read(1024)						# 读makefile传输文件，一次读1024个字节
            self.stream_bytes += msg
            first = self.stream_bytes.find(b'\xff\xd8')					# 检测帧头位置
            last = self.stream_bytes.find(b'\xff\xd9')					# 检测帧尾位置

            if first != -1 and last != -1:
                jpg = self.stream_bytes[first:last + 2]					# 帧头和帧尾中间的数据就是二进制图片数据（编码后的二进制图片数据，需要解码后使用）
                self.stream_bytes = self.stream_bytes[last + 2:]				# 更新stream_bytes数据
                image = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)			# 将二进制图片数据转换成numpy.uint8格式（也就是图片格式）数据，然后解码获得图片数据
                return image

        except:
            print("Error：连接出错！")


class ColorDetector(object):
    """颜色识别器"""
    def __init__(self) -> None:
        """初始化"""
        # region 初始颜色阈值
        self.low_h = 0
        self.low_s = 0
        self.low_v = 0

        self.high_h = 180
        self.high_s = 255
        self.high_v = 255

        self.minarea = 0
        self.maxarea = 100000 # 220800
        # endregion

    def __call__(self,id):
        # region 创建trackbar
        cv2.namedWindow(f'trackbar{id}', cv2.WINDOW_NORMAL)
        cv2.createTrackbar('low_h', f'trackbar{id}', self.low_h, 180, self.call_back_low_h)
        cv2.createTrackbar('high_h', f'trackbar{id}', self.high_h, 180, self.call_back_high_h)
        cv2.createTrackbar('low_s', f'trackbar{id}', self.low_s, 255, self.call_back_low_s)
        cv2.createTrackbar('high_s', f'trackbar{id}', self.high_s, 255, self.call_back_high_s)
        cv2.createTrackbar('low_v', f'trackbar{id}', self.low_v, 255, self.call_back_low_v)
        cv2.createTrackbar('high_v', f'trackbar{id}', self.high_v, 255, self.call_back_high_v)

        cv2.createTrackbar('minarea', f'trackbar{id}', self.minarea, 100000, self.call_back_minarea)
        cv2.createTrackbar('maxarea', f'trackbar{id}', self.maxarea, 100000, self.call_back_maxarea)
        # endregion
        pass

    # region trackbar回调函数
    def callback(self, x):
        pass

    def call_back_low_h(self, low_h):
        self.low_h = low_h
    
    def call_back_high_h(self, high_h):
        self.high_h = high_h

    def call_back_low_s(self, low_s):
        self.low_s = low_s

    def call_back_high_s(self, high_s):
        self.high_s = high_s

    def call_back_low_v(self, low_v):
        self.low_v = low_v

    def call_back_high_v(self, high_v):
        self.high_v = high_v

    def call_back_minarea(self, minarea):
        self.minarea = minarea

    def call_back_maxarea(self, maxarea):
        self.maxarea = maxarea
    # endregion

    def detect(self, img:cv2.typing.MatLike) -> cv2.typing.MatLike:
        """颜色识别
        * img: 传入的图像数据

        返回值：二值化过滤后的图像数据"""
        _shape = img.shape
        try:
            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)				# 将BGR图像转换成HSV图像
        except:
            return np.zeros((_shape[0], _shape[1]), np.uint8)

        low = np.array([self.low_h, self.low_s, self.low_v])		# 低阈值
        high = np.array([self.high_h, self.high_s, self.high_v])	# 高阈值

        mask = cv2.inRange(hsv, low, high)						# 通过阈值过滤图像，将在阈值范围内的像素点设置为255，不在阈值范围内的像素点设置为0
        kernel = np.ones((5, 5), np.uint8)						# 创建一个5*5的矩阵，矩阵元素全为1
        opencal = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)	# 开运算，先腐蚀后膨胀
        opencal = cv2.morphologyEx(opencal, cv2.MORPH_OPEN, kernel)	# 开运算，先腐蚀后膨胀
        return opencal

    def draw_rectangle(self, img:cv2.typing.MatLike, 
                       contours:typing.Sequence[cv2.typing.MatLike]) -> tuple[cv2.typing.MatLike, list[tuple[int, int, int, int]]]: 
        """在图像上绘制矩形框
        * img: 传入的图像数据
        * contours: 轮廓数据，通过cv2.findContours()函数获得的轮廓数据

        返回值：绘制矩形框后的图像数据，矩形框的坐标"""
        lst = []
        for cnt in contours:										# 遍历轮廓数据
            x, y, w, h = cv2.boundingRect(cnt)						# 获取矩形框的坐标和宽高

            if (self.minarea > w*h or w*h > self.maxarea):
                continue
            if h/w > 1.5:
                continue
            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)	# 在图像上绘制矩形框
            lst.append((x, y, x + w, y + h))
        return img, lst
    
    def get_edge(self, img:cv2.typing.MatLike) -> typing.Sequence[cv2.typing.MatLike]:
        """获取边缘
        * img: 传入的图像数据
        返回值：边缘图像"""
        # conuours是轮廓，hierarchy是轮廓的层级
        contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        return contours
    
    def expend_point(self, img:cv2.typing.MatLike, point:tuple[int, int], _expand:int=10) -> cv2.typing.MatLike:
        """识别到激光点坐标后对其位置进行扩展，得到激光点周围的图像信息
        ------
        * img: 传入的图像数据
        * point: 激光点坐标
        * _expand: 扩展的像素数
        返回值: 激光点周围的图像信息"""
        _shape = img.shape
        # region 限制边界
        if point[0]-_expand < 0:
            x1 = 0
        else:
            x1 = point[0]-_expand

        if point[0]+_expand > _shape[1]:
            x2 = _shape[1]
        else:
            x2 = point[0]+_expand

        if point[1]+_expand < 0:
            y1 = 0
        else:
            y1 = point[1]+_expand

        if point[1]-_expand > _shape[0]:
            y2 = _shape[0]
        else:
            y2 = point[1]-_expand
        # endregion
        
        x1,x2,y1,y2 = map(int, [x1,x2,y1,y2])

        # 前面是x，后面是y，因为后面的return是先y后x
        nw = (x1, y1)       # 左上角坐标
        se = (x2, y2)       # 右下角坐标
        return img[nw[1]:se[1], nw[0]:se[0]]

    def __del__(self):
        """析构函数"""
        cv2.destroyAllWindows()
        pass


if __name__ == '__main__':
    reveiver = ReceiveImg('192.168.137.112', 8000)
    light_detector = ColorDetector()        # 创建激光识别器对象,id=1
    light_detector.low_v = 246
    light_detector.minarea = 680
    light_detector(1)

    laser_detector = ColorDetector()        # 创建激光颜色识别器对象,id=2
    laser_detector.low_h = 113
    laser_detector.minarea = 600
    laser_detector(2)

    # cv2.namedWindow('laser', cv2.WINDOW_NORMAL)
    #TODO:等待测试
    while True:
        img = reveiver.receive()
        if img is None:
            continue
        # 水平翻转img
        img = cv2.rotate(img, cv2.ROTATE_180)
        # 高斯滤波
        img = cv2.GaussianBlur(img, (5, 5), 0)
        # cv2.imshow('ori', img)

        # region 识别激光亮点

        light_img = light_detector.detect(img)
        cv2.imshow('light', light_img)

        contours = light_detector.get_edge(light_img)
        img1, points = light_detector.draw_rectangle(img, contours)
        cv2.imshow('light rectangle', img1)
        # endregion 
        #points现在是roi的坐标

        # region 识别激光颜色
        if len(points) <= 2:
            for index, i in enumerate(points):
                img2 = img[i[1]:i[3], i[0]:i[2]]            # 提取roi区域
                cv2.imshow(f'roi{index}', img2)

                mask = laser_detector.detect(img2)          # 识别颜色
                cv2.namedWindow(f'mask{index}', cv2.WINDOW_NORMAL)
                cv2.imshow(f'mask{index}', mask)

                contours = laser_detector.get_edge(mask)        # 获取边缘
                img3, point = laser_detector.draw_rectangle(img2, contours)     # 绘制矩形框

                if len(point)!=0:       # 识别到
                    location = (int((i[0]+i[2])/2), int((i[1]+i[3])/2))    # 计算激光点坐标
                    cv2.circle(img, location, 10, (0, 0, 255), -1)
                    cv2.imshow('laser', img)
                    print(location)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
