# @Time : 2024/10/7 12:23
# @Author : Fioman 
# @Phone : 13149920693
"""
2拼正常竖直排版的算法
"""
import imutils

from common.data_result import DataResult
from common.enum_model import RotatedType
from tools.fioman_math_tools import *
from tools.line_camera_params import LineCameraParams

cp = LineCameraParams()
color_blue = (255, 0, 0)
color_green = (0, 255, 0)
color_red = (0, 0, 255)
color_list = [color_blue, color_green, color_red]


def image_show(name, image, show_state=False):
    """
    显示图片,用来调试的时候显示图片,按照一定的比例进行缩放
    @param name: 显示的图片的窗口名称
    @param image: 要显示的图像
    @param show_state:
    @return:
    """
    if show_state:
        size_scale = 5
        new_width = int(image.shape[1] / size_scale)
        new_height = int(image.shape[0] / size_scale)
        show_image = cv.resize(image, (new_width, new_height), cv.INTER_AREA)
        cv.namedWindow(name)
        cv.imshow(name, show_image)
        cv.waitKey(0)


def find_total_board(image, rotate_type):
    """
    寻找物料区域,返回物料区域的thres和total_board_box
    @param image:
    @param rotate_type:图像旋转方式,顺时针或者逆时针或者无旋转
    @return:
    """
    result = DataResult()
    if rotate_type == RotatedType.NoRotated:
        thres_used = image[:300, :]
    elif rotate_type == RotatedType.ClockWise:
        thres_used = image[:, image.shape[1] - 300:image.shape[1]]
    else:
        thres_used = image[:, :300]
    image_show("totalBoardThresUsed", thres_used)
    thres_used_val = int(np.mean(thres_used) + 18)
    print(f"物料区域查找使用的阈值: {thres_used_val}")
    filter_area = 5000  # 过滤面积阈值
    _, board = cv.threshold(image, thres_used_val, 255, cv.THRESH_BINARY)
    image_show("BoardThresOriginal", board)
    # 使用开操作去除掉小点
    kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
    board = cv.morphologyEx(board, cv.MORPH_OPEN, kernel, iterations=5)
    image_show("BoardOpened", board)

    cnts = cv.findContours(board.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    # 找到比较大的轮廓
    large_contours = [cnt for cnt in cnts if cv.contourArea(cnt) > filter_area]
    if len(large_contours) >= 2:
        center_points = [get_center_by_contour(cnt) for cnt in large_contours]
        for index, center in enumerate(center_points):
            another_center = center_points[(index + 1) % len(center_points)]
            cv.line(board, center, another_center, 255, 20)
        image_show("JointCenterLine", board)
        # 连线之后再找一遍轮廓
        cnts = cv.findContours(board.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        cnt_first_finded = sorted(cnts, key=cv.contourArea, reverse=True)[0]
    elif len(large_contours) == 1:
        cnt_first_finded = large_contours[0]
    else:
        result.state = False
        result.data = {}
        result.info = "图像错误,未找到任何轮廓"
        return result

    # 轮廓找到以后,将四个角的影响去除掉
    rect_points = np.intp(cv.boxPoints(cnt_first_finded))
    left_top, right_top, right_bottom, left_bottom = get_rect_points_clockwise(rect_points)
    total_width = distance(left_top, right_top)
    total_height = distance(left_top, left_bottom)
    board_final_mask = np.zeros_like(board)
    board_final = cv.drawContours(board_final_mask, [cnt_first_finded], -1, 255, -1)
    image_show("BoardFinal", board_final)
    move_width = int(total_width * 0.01)
    move_height = int(total_height * 0.1)
    extend = 20
    # 左上角
    board_final[left_top[1] - extend:left_top[1] + move_height,
    left_top[0] - extend:left_top[0] + move_width] = 0
    # 右上角
    board_final[right_top[1] - extend:right_top[1] + move_height,
    right_top[0] - move_width:right_top[0] + extend] = 0
    # 左下角
    board_final[left_bottom[1] - move_height:left_bottom[1] + extend,
    left_bottom[0] - extend:left_bottom[0] + move_width] = 0
    # 右下角
    board_final[right_bottom[1] - move_height:right_bottom[1] + extend,
    right_bottom[0] - move_width:right_bottom[0] + extend] = 0
    image_show("FinalBoardAngleMoved", board_final)

    # 去除掉四个角的影响之后,再找一次轮廓
    cnts = cv.findContours(board_final.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnt_final_finded = sorted(cnts, key=cv.contourArea, reverse=True)[0]
    total_board_thres = board_final.copy()
    image_show("TotalBoardThres", total_board_thres)
    box = cv.minAreaRect(cnt_final_finded)
    (cx, cy), (w, h), angle = box
    if angle > 70:
        angle = 90 - angle
        box = (cx, cy), (h, w), angle
    elif angle < -70:
        angle = 90 + angle
        box = (cx, cy), (h, w), angle
    rect_points = np.intp(cv.boxPoints(box))
    image_color = cv.cvtColor(image, cv.COLOR_BGR2RGB)
    box_mask = np.zeros_like(image)
    cv.drawContours(box_mask, [rect_points], -1, 255, -1)
    cv.drawContours(image_color, [rect_points], -1, color_green, 2)
    image_show("TotalBoardFinded", image_color)
    result.data = {
        "box": box,
        "total_board_thres": total_board_thres,
        "image_res": image_color,
    }
    return result


def check_total_board_is_ok(image, board_break, joint_number, rotated_type):
    """
    检测物料区域是否OK,主要是用来判断来料是否正常,如果不正常,需要提供人工进行操作
    @param image:
    @param board_break:
    @param joint_number:
    @param rotated_type:图像的旋转方式
    @return:
    """
    roi_h = 120
    h, w = image.shape[:2]
    if rotated_type == RotatedType.NoRotated:
        table_ystart, table_yend = cp.sensor_pos - roi_h, cp.sensor_pos
        table_xstart, table_xend = cp.table_pos_start, cp.table_pos_end
        sensor_ystart, sensor_yend = cp.sensor_pos - roi_h, cp.sensor_pos
        sensor_xstart, sensor_xend = cp.table_pos_start, cp.table_pos_end
    elif rotated_type == RotatedType.ClockWise:
        table_ystart, table_yend = cp.table_pos_start, cp.table_pos_end
        table_xstart, table_xend = w - cp.sensor_pos, w - cp.sensor_pos + roi_h
        sensor_ystart, sensor_yend = cp.table_pos_start, cp.table_pos_end
        sensor_xstart, sensor_xend = w - cp.sensor_pos - roi_h, w - cp.sensor_pos
    else:
        table_ystart, table_yend = h - cp.table_pos_end, h - cp.table_pos_start
        table_xstart, table_xend = cp.sensor_pos - roi_h, cp.sensor_pos
        sensor_ystart, sensor_yend = h - cp.table_pos_end, h - cp.table_pos_start
        sensor_xstart, sensor_xend = cp.sensor_pos, cp.sensor_pos + roi_h
    table_roi = image[table_ystart:table_yend, table_xstart:table_xend]
    sensor_roi = image[sensor_ystart:sensor_yend, sensor_xstart:sensor_xend]
    table_val = int(np.mean(table_roi))
    sensor_val = int(np.mean(sensor_roi))
    result = find_total_board(image, rotated_type)
    if not result.state:
        return result
    box = result.data["box"]
    totalboard_thres = result.data["total_board_thres"]
    image_res = result.data["image_res"]
    # 获取传感器偏移距离
    rect_points = np.intp(cv.boxPoints(box))
    left_top, right_top, left_bottom, right_bottom = get_rect_points_clockwise(rect_points)
    if rotated_type == RotatedType.NoRotated:
        sensor_point = ((cp.table_pos_start + cp.table_pos_end) // 2, cp.sensor_pos)
        trigger_line_k, trigger_line_b = get_line_k_and_b(left_top, right_top)
    elif rotated_type == RotatedType.ClockWise:
        sensor_point = (w - cp.sensor_pos, (cp.table_pos_start + cp.table_pos_end) // 2)
        trigger_line_k, trigger_line_b = get_line_k_and_b(right_top, right_bottom)
    else:
        sensor_point = (cp.sensor_pos, (w - cp.table_pos_start + w - cp.table_pos_end) // 2)
        trigger_line_k, trigger_line_b = get_line_k_and_b(left_top, left_bottom)
    sensor_trigger_dis = get_point_to_line_distance(sensor_point, trigger_line_k, trigger_line_b)
    if not board_break:
        max_thres = cp.height_max
    else:
        max_thres = cp.height_max + 200

    image_color = image_res.copy()
    table_points = [(table_xstart, table_ystart), (table_xend, table_ystart),
                    (table_xstart, table_yend), (table_xend, table_yend)]
    sensor_points = [(sensor_xstart, sensor_ystart), (sensor_xend, sensor_ystart),
                     (sensor_xstart, sensor_yend), (sensor_xend, sensor_yend)]
    cv.drawContours(image_color, [np.array(table_points)], -1, color_blue, 3)
    cv.drawContours(image_color, [np.array(sensor_points)], -1, color_green, 3)
    # 将台布区域和传感器区域以及它们的值,写到图像上去
    if rotated_type == RotatedType.NoRotated:
        table_center = get_two_points_middle(table_points[0], table_points[1], res_is_int=True)
        sensor_center = get_two_points_middle(sensor_points[2], sensor_points[3], res_is_int=True)
        table_val_pos = table_center[0], table_center[1] - 20
        sensor_val_pos = sensor_center[0], sensor_center[1] + 50
    elif rotated_type == RotatedType.ClockWise:
        table_center = get_two_points_middle(table_points[1], table_points[2], res_is_int=True)
        sensor_center = get_two_points_middle(sensor_points[0], sensor_points[3], res_is_int=True)
        table_val_pos = table_center[0] + 20, table_center[1]
        sensor_val_pos = sensor_center[0] - 50, sensor_center[1]
    else:
        table_center = get_two_points_middle(table_points[0], table_points[3], res_is_int=True)
        sensor_center = get_two_points_middle(sensor_points[1], sensor_points[2], res_is_int=True)
        table_val_pos = table_center[0] - 50, table_center[1]
        sensor_val_pos = sensor_center[0] + 20, sensor_center[1]

    cv.drawContours(image_color, [np.array(table_points)], -1, color_blue, 3)
    cv.drawContours(image_color, [np.array(sensor_points)], -1, color_green, 3)
    cv.putText(image_color, str(table_val), table_val_pos, cv.FONT_HERSHEY_COMPLEX,
               2, color_blue, 3)
    cv.putText(image_color, str(sensor_val), sensor_val_pos, cv.FONT_HERSHEY_COMPLEX,
               2, color_green, 3)
