import cv2
import numpy as np
from cv2 import aruco
from loguru import logger
import random
import traceback
from cali_util import calculate_target_position
import os

# 初始化参数
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
parameters = aruco.DetectorParameters()
ori_scale = None

REAL_OFFSET = (98, -45)
VISION_DEBUG = 1


def imshow(name, img):
    if VISION_DEBUG:
        cv2.imshow(name, img)
        cv2.waitKey(1)
    else:
        pass
        # cv2.destroyWindow(name)


def loopok():
    if os.environ.get("WORK_THREAD_LOOP") == "1":
        return True
    else:
        logger.warning("WORK_THREAD_LOOP is not 1")
        return False


def setloopok(isok):
    os.environ["WORK_THREAD_LOOP"] = "1" if isok else "0"


def detect_marks(frame):
    markers, ids, _ = aruco.detectMarkers(frame, aruco_dict, parameters=parameters)

    ori_posi = None
    ori_posi_detailed = None
    board_mark_up = None
    board_mark_down = None

    if markers is not None:
        if markers[0] is not None:
            for mark_index in range(len(markers)):
                center_posi = np.mean(markers[mark_index][0], axis=0)
                mark_id = ids[mark_index][0]
                if mark_id == 6:
                    ori_posi = center_posi
                    ori_posi_detailed = markers[mark_index]
                elif mark_id == 4:
                    board_mark_up = center_posi
                elif mark_id == 5:
                    board_mark_down = center_posi

                # mark the aruco
                # cv2.circle(
                #     frame,
                #     (int(center_posi[0]), int(center_posi[1])),
                #     5,
                #     (0, 0, 255),
                #     10,
                # )

        else:
            logger.warning("No marks, wait for next frame")
            raise ValueError("No marks, wait for next frame")
    else:
        raise ValueError("No marks, wait for next frame")

    if ori_posi is None or board_mark_up is None or board_mark_down is None:
        logger.warning("No marks, wait for next frame")
        raise ValueError("No marks, wait for next frame")

    return (
        {"center": ori_posi, "detail": ori_posi_detailed},
        board_mark_up,
        board_mark_down,
    )


def extend_rect(rect, delta=10):
    (center_x, center_y), (w, h), angle = rect

    new_w = w + 2 * delta
    new_h = h + 2 * delta

    new_rect = ((center_x, center_y), (new_w, new_h), angle)

    return new_rect


def calculate_aspect_ratio(rect):
    (_, _), (w, h), _ = rect

    if w > h:
        aspect_ratio = h / w
    else:
        aspect_ratio = w / h
    return aspect_ratio


def build_grid_rects(center, size, angle):
    # generate 3x3 grid rects
    rects = []
    rows, cols = 3, 3
    for i in range(rows):
        for j in range(cols):
            offset_x = (j - 1) * size
            offset_y = (i - 1) * size

            theta = np.radians(angle)
            rot_matrix = np.array(
                [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
            )
            dx, dy = rot_matrix @ np.array([offset_x, offset_y])

            rect = ((center[0] + dx, center[1] + dy), (size, size), angle)
            rects.append({"rect": rect, "grid": (i, j), "type": 0})
    return rects


def transform_rect(rect, M_inv):
    box = cv2.boxPoints(rect)

    box_transformed = cv2.transform(np.array([box]), M_inv)[0]
    rect_original = cv2.minAreaRect(box_transformed)

    return rect_original


def get_mask(rotated_rect, img_size):
    height, width, _ = img_size

    # create a black mask
    mask = np.zeros((height, width), dtype=np.uint8)

    box = cv2.boxPoints(rotated_rect)
    box = np.int64(box)

    cv2.fillPoly(mask, [box], 255)

    return mask


def get_all_rect(center0, center1):
    dx = center1[0] - center0[0]
    dy = center1[1] - center0[1]
    angle_rad = np.arctan2(dy, dx)
    angle_deg = np.degrees(angle_rad)

    distance = np.sqrt(dx**2 + dy**2)
    grid_size = distance / 3.75

    M = cv2.getRotationMatrix2D(tuple((300, 200)), angle_deg, 1.0)

    # apply M to center0/1
    center0m = tuple(np.dot(M, np.array([center0[0], center0[1], 1])))
    center1m = tuple(np.dot(M, np.array([center1[0], center1[1], 1])))

    # get middle of center0/1m
    grid_center = tuple(
        (
            (center0m[0] + center1m[0]) / 2 + 3,
            (center0m[1] + center1m[1]) / 2 + grid_size * 2,
        )
    )

    grid_rect = build_grid_rects(grid_center, grid_size, 0)

    store_rect = [
        (
            (grid_center[0] + grid_size * 2 + 10, grid_center[1]),
            (grid_size * 3, grid_size / 1.55),
            90,
        ),
        (
            (grid_center[0] - grid_size * 2 - 10, grid_center[1]),
            (grid_size * 3, grid_size / 1.55),
            90,
        ),
    ]

    # all_rects = grid_rects + [top_rect, bottom_rect]
    M_inv = cv2.invertAffineTransform(M)
    ori_grid_rect = [
        {"rect": transform_rect(i["rect"], M_inv), "grid": i["grid"], "area": 0}
        for i in grid_rect
    ]
    ori_store_rect = [
        {
            "rect": transform_rect(store_rect[rect_index], M_inv),
            "grid": (-(rect_index + 1), -(rect_index + 1)),
            "area": rect_index + 1,
        }
        for rect_index in range(len(store_rect))
    ]

    hole_area = [
        {
            "rect": transform_rect(
                (
                    (grid_center[0], grid_center[1]),
                    (grid_size * 3.2, grid_size * 5),
                    90,
                ),
                M_inv,
            ),
            "grid": (-3, -3),
            "area": 9,
        }
    ]

    return ori_grid_rect + ori_store_rect + hole_area


def calculate_circle_mean(frame, center, radius):
    mask = np.zeros(frame.shape[:2], dtype=np.uint8)
    cv2.circle(mask, center, radius, 255, -1)

    mean_value = cv2.mean(frame, mask=mask)

    return int(mean_value[0])


def filter_close_points_grid(points, min_distance):
    """
    使用网格空间哈希筛选掉距离过近的点。

    参数:
        points: 点集，形状为 (N, 2) 的NumPy数组。
        min_distance: 允许的最小间距。

    返回:
        filtered_points: 筛选后的点集。
    """
    grid_size = min_distance
    grid = {}
    keep_indices = []

    # for i, (x, y) in enumerate(points):
    # 计算当前点所属的网格坐标
    for i in range(len(points)):
        (x, y) = points[i]["center"]

        grid_x, grid_y = int(x // grid_size), int(y // grid_size)
        conflict = False

        # 检查当前网格及相邻8个网格
        for dx in [-1, 0, 1]:
            for dy in [-1, 0, 1]:
                neighbor_key = (grid_x + dx, grid_y + dy)
                if neighbor_key in grid:
                    for j in grid[neighbor_key]:
                        # 计算实际距离
                        if (
                            np.linalg.norm(
                                np.asarray(points[i]["center"], dtype=np.float32)
                                - np.asarray(points[j]["center"], dtype=np.float32)
                            )
                            < min_distance
                        ):
                            conflict = True
                            break
                    if conflict:
                        break
            if conflict:
                break

        if not conflict:
            keep_indices.append(points[i])
            if points[i]["center"] == None:
                logger.error("None center")

            if (grid_x, grid_y) not in grid:
                grid[(grid_x, grid_y)] = []
            grid[(grid_x, grid_y)].append(i)

    return keep_indices


def get_all_pieces(image):
    ed = cv2.ximgproc.createEdgeDrawing()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    ed.detectEdges(gray)
    ellipses = ed.detectEllipses()

    if ellipses is None:
        return []

    pieces = {}

    for i in range(len(ellipses)):
        center = (int(ellipses[i][0][0]), int(ellipses[i][0][1]))
        r = ellipses[i][0][2]

        if r < 10:
            continue

        pieces.update(
            {
                calculate_circle_mean(image, center, int(r * 0.5)): {
                    "center": center,
                    "axes": int(r),
                    "type": None,
                }
            }
        )

    brightness = list(pieces.keys())
    brightness.sort()

    brightness_diff = np.diff(brightness)
    change_point = np.where(brightness_diff > 20)[0]

    for key_index in range(len(brightness)):
        if len(change_point) < 1:
            pieces[brightness[key_index]]["type"] = (
                0 if np.average(brightness) < 100 else 1
            )
            continue
        pieces[brightness[key_index]]["type"] = 0 if key_index <= change_point[0] else 1

    return list(pieces.values())


def is_point_in_rect(point, rotated_rect):
    # 获取旋转矩形的中心、尺寸和角度
    (cx, cy), (w, h), angle = rotated_rect

    # 将点平移到矩形中心为原点的坐标系
    translated_x = point[0] - cx
    translated_y = point[1] - cy

    # 计算旋转角度（逆时针方向）
    theta = np.radians(-angle)  # 转换为弧度并取负（反向旋转）

    # 构造旋转矩阵
    cos_theta = np.cos(theta)
    sin_theta = np.sin(theta)

    # 将点旋转回矩形的局部坐标系（对齐矩形）
    rotated_x = translated_x * cos_theta - translated_y * sin_theta
    rotated_y = translated_x * sin_theta + translated_y * cos_theta

    # 判断旋转后的点是否在轴对齐矩形内
    return (abs(rotated_x) <= w / 2) and (abs(rotated_y) <= h / 2)


def get_all_pieces_detail(rects, pieces):
    detail = []
    for piece in pieces:
        # TODO: shadow should be filtered out

        is_match = False
        for rect in rects:
            if rect["area"] == 9:
                continue

            if is_point_in_rect(piece["center"], rect["rect"]):
                is_match = True
                piece.update(rect)
                detail.append(piece)

        if not is_match:
            piece.update({"area": 3, "grid": None})
            detail.append(piece)

    return detail


def get_real_posi(from_center=None, image_size=(480, 640), use_cali=True):
    global ori_scale

    M = cv2.getRotationMatrix2D((image_size[1] / 2, image_size[0] / 2), 180, 1)
    M_3x3 = np.vstack([M, [0, 0, 1]])
    M_flip = np.array([[1, 0, 0], [0, -1, image_size[0]], [0, 0, 1]], dtype=np.float32)
    M_combined = M_3x3 @ M_flip
    M_combined = M_combined[:2]

    if from_center == None:
        return M_combined
    else:
        if use_cali:
            from_center = calculate_target_position(from_center)
        result = np.int64(
            np.dot(
                M_combined,
                np.array([from_center[0], from_center[1], 1]),
            )[:2]
        )
        logger.debug(f"M {from_center} -> {result}")
        # return (-result[1] + REAL_OFFSET[0], result[0] + REAL_OFFSET[1])
        return (
            round((int(-result[1])) * ori_scale + REAL_OFFSET[0], 1),
            round((int(result[0])) * ori_scale + REAL_OFFSET[1], 1),
        )


def find_available_place(rotated_rect, target_radius, objects, max_attempts=10000):
    """
    查找可以放置目标圆的圆心位置
    参数:
        rotated_rect: 旋转矩形，格式为 ((中心x, 中心y), (宽度, 高度), 旋转角度)
        target_radius: 目标圆的半径
        objects: 物体列表，每个物体为 {'center': (x, y), 'radius': r}
        max_attempts: 最大尝试次数，默认10000次
    返回:
        可用圆心坐标 (x, y)，如果找不到则返回 None
    """
    # 获取旋转矩形的四个顶点
    rect_points = cv2.boxPoints(rotated_rect)
    rect_contour = rect_points.reshape((-1, 1, 2)).astype(np.float32)

    # 计算旋转矩形的轴对齐包围盒，用于限制采样范围
    x, y, w, h = cv2.boundingRect(rect_contour)

    for _ in range(max_attempts):
        # 在包围盒范围内生成随机点
        px = random.uniform(x, x + w)
        py = random.uniform(y, y + h)
        point = (px, py)

        # 检查点是否在旋转矩形内部
        if cv2.pointPolygonTest(rect_contour, point, measureDist=False) < 0:
            continue

        # 检查是否与所有物体圆保持足够距离
        collision = False
        for obj in objects:
            obj_x, obj_y = obj["center"]
            obj_r = obj["axes"]
            # 计算欧氏距离
            distance = np.hypot(px - obj_x, py - obj_y)
            if distance < (obj_r + target_radius + 1e-6):  # 避免浮点误差
                collision = True
                break
        if not collision:
            return (px, py)

    # 超过最大尝试次数仍未找到 TODO 这里真的出现None了之后无计可施，必须避免
    return None


def get_main_status(cap):
    global ori_scale
    point_detail = []
    ori_ang = []
    ori_pix = []

    frame_count = 0

    for _ in range(5):
        ret, frame = cap.read()

    while frame_count < 30 and loopok():
        ret, frame = cap.read()

        # cv2.imshow("frame", frame)
        # input("debug")

        if not ret:
            logger.error("No frame in camera")
            cap.release()
            cap = cv2.VideoCapture("/dev/video20")
            continue

        frame = cv2.flip(frame, -1)
        dbg_img = frame.copy()

        img_size = frame.shape

        try:
            ori_posi, board_mark_up, board_mark_down = detect_marks(frame)
        except:
            logger.error(f"Mark detect error: {traceback.format_exc()}")
            continue

        r1 = get_all_rect(board_mark_up, board_mark_down)
        c = [(255, 0, 0), (0, 255, 0), (0, 255, 0), 0, 0, 0, 0, 0, 0, (0, 0, 255)]
        # 绘制所有rect的顶点
        for rect in r1:
            box = cv2.boxPoints(rect["rect"])
            box = np.int64(box)
            cv2.drawContours(dbg_img, [box], 0, c[rect["area"]], 2)

            if rect["area"] == 9:
                mask = get_mask(rect["rect"], img_size)
                roi = cv2.bitwise_and(frame, frame, mask=mask)

        pieces = get_all_pieces(roi)
        point_detail += get_all_pieces_detail(r1, pieces)

        ori_rect = cv2.minAreaRect(ori_posi["detail"])
        ori_ang.append(ori_rect[2])
        ori_pix.append(ori_rect[1][0])
        ori_pix.append(ori_rect[1][1])

        frame_count += 1

    point_detail = filter_close_points_grid(point_detail, 20)

    logger.debug(f"point_detail got num: {len(point_detail)}")

    M_combined = get_real_posi()

    ori_scale = 10 / int(np.average(ori_pix))
    logger.debug(f"get ori_scale: {ori_scale}")

    for i, point in enumerate(point_detail):
        calied = calculate_target_position(point["center"])
        real_posi = np.int64(
            np.dot(
                M_combined,
                np.array([calied[0], calied[1], 1]),
            )[:2]
        )
        point_detail[i].update(
            {
                "real": (
                    round((int(-real_posi[1])) * ori_scale + REAL_OFFSET[0], 1),
                    round((int(real_posi[0])) * ori_scale + REAL_OFFSET[1], 1),
                )
            }
        )

    area_rects = {}
    for rect in r1:
        area_rects.update({rect["grid"]: rect})

    try:
        for circle in point_detail:
            cv2.circle(
                dbg_img,
                circle["center"],
                circle["axes"],
                (0, 255, 0) if circle["type"] else (255, 0, 0),
                2,
            )
            cv2.putText(
                dbg_img,
                f"{str(circle['grid'])}:{str(circle['area'])}",
                (circle["center"][0] - 10, circle["center"][1] - 12),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (255, 255, 0),
                2,
            )
            cv2.putText(
                dbg_img,
                f"{str(circle['real'])}",
                (circle["center"][0] - 10, circle["center"][1] + 3),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (255, 255, 0),
                2,
            )
            cv2.putText(
                dbg_img,
                f"{str(circle['center'])}",
                (circle["center"][0] - 10, circle["center"][1] + 17),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (255, 255, 0),
                2,
            )
        imshow("dbg", dbg_img)
    except:
        logger.warning("dbg image failed")

    return point_detail, area_rects
