# -*- coding: utf-8 -*-
import numpy as np
import cv2
import matplotlib.pyplot as plt
import config  # 👈 引入配置文件

# ========== 图像处理函数 ==========
def grayscale(img):
    return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

def gaussian_blur(img, kernel_size):
    return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)

def canny(img, low_threshold, high_threshold):
    return cv2.Canny(img, low_threshold, high_threshold)

def region_of_interest(img, ratio_top, ratio_bottom, width_ratio):
    h, w = img.shape[:2]
    v1 = (int(w * (1 - width_ratio) / 2), int(h * ratio_bottom))
    v2 = (int(w * (1 - width_ratio) / 2), int(h * ratio_top))
    v3 = (int(w * (1 + width_ratio) / 2), int(h * ratio_top))
    v4 = (int(w * (1 + width_ratio) / 2), int(h * ratio_bottom))
    vertices = np.array([[v1, v2, v3, v4]], dtype=np.int32)

    mask = np.zeros_like(img)
    ignore_mask_color = (255,) * img.shape[2] if len(img.shape) > 2 else 255
    cv2.fillPoly(mask, vertices, ignore_mask_color)
    return cv2.bitwise_and(img, mask)

def draw_lanes(img, lines, color, thickness):
    if len(lines) == 0:
        return
    x_coords, y_coords = [], []
    for x1, y1, x2, y2 in lines:
        x_coords.extend([x1, x2])
        y_coords.extend([y1, y2])

    try:
        if len(set(y_coords)) == 1:
            y_avg = int(np.mean(y_coords))
            cv2.line(img, (0, y_avg), (img.shape[1], y_avg), color, thickness)
        else:
            poly = np.polyfit(y_coords, x_coords, 1)
            y1, y2 = img.shape[0], int(img.shape[0] * config.ROI_RATIO_TOP)
            x1 = int(poly[0] * y1 + poly[1])
            x2 = int(poly[0] * y2 + poly[1])
            cv2.line(img, (x1, y1), (x2, y2), color, thickness)
    except:
        pass

def draw_lines(img, lines, color_left, color_right, thickness):
    if lines is None:
        return

    left_lines, right_lines = [], []
    for line in lines:
        for x1, y1, x2, y2 in line:
            if x2 == x1:
                continue
            slope = (y2 - y1) / (x2 - x1)
            if -5 < slope < -0.5:
                left_lines.append((x1, y1, x2, y2))
            elif 0.5 < slope < 5:
                right_lines.append((x1, y1, x2, y2))

    draw_lanes(img, left_lines, color_left, thickness)
    draw_lanes(img, right_lines, color_right, thickness)

def hough_lines(img):
    lines = cv2.HoughLinesP(
        img,
        config.HOUGH_RHO,
        np.pi / 180 * config.HOUGH_THETA,
        config.HOUGH_THRESHOLD,
        np.array([]),
        minLineLength=config.HOUGH_MIN_LINE_LEN,
        maxLineGap=config.HOUGH_MAX_LINE_GAP
    )
    line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
    if lines is not None:
        draw_lines(line_img, lines, config.LEFT_LANE_COLOR, config.RIGHT_LANE_COLOR, config.DRAW_THICKNESS)
    return line_img

def weighted_img(img, initial_img, alpha=0.8, beta=1., gamma=0.):
    return cv2.addWeighted(initial_img, alpha, img, beta, gamma)

# ========== 主处理流程 ==========
def pipeline(img):
    image = np.array(img)
    gray = grayscale(image)
    blur = gaussian_blur(gray, config.GAUSSIAN_KERNEL_SIZE)
    edges = canny(blur, config.CANNY_LOW_THRESHOLD, config.CANNY_HIGH_THRESHOLD)
    roi = region_of_interest(
        edges,
        config.ROI_RATIO_TOP,
        config.ROI_RATIO_BOTTOM,
        config.ROI_WIDTH_RATIO
    )
    hough_img = hough_lines(roi)
    result = weighted_img(hough_img, image)
    return result

# ========== 图片处理 ==========
def processImage():
    print("🖼️ 正在处理图片...")
    image = cv2.imread(config.IMAGE_PATH)
    if image is None:
        print(f"❌ 无法读取图片: {config.IMAGE_PATH}")
        return

    try:
        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        processed = pipeline(image_rgb)
        processed_bgr = cv2.cvtColor(processed, cv2.COLOR_RGB2BGR)

        # 显示结果（使用 OpenCV）
        cv2.imshow("Lane Detection - Image Mode", processed_bgr)
        print("✅ 图片处理完成，按任意键关闭窗口")
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        # 可选：保存结果
        # cv2.imwrite("./output_image_result.jpg", processed_bgr)
        # print("💾 结果已保存为 output_image_result.jpg")

    except Exception as e:
        print(f"⚠️ 图片处理出错: {e}")
        cv2.imshow("Lane Detection - Image Mode", image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

# ========== 视频处理 ==========
def processVideo():
    cap = cv2.VideoCapture(config.VIDEO_PATH)
    if not cap.isOpened():
        print("❌ 无法打开视频文件")
        return

    print(f"▶️ 正在播放车道检测效果... 按 Q 退出")
    frame_count = 0

    while True:
        ret, frame = cap.read()
        if not ret:
            print("✅ 视频播放结束")
            break

        try:
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            processed = pipeline(frame_rgb)
            processed_bgr = cv2.cvtColor(processed, cv2.COLOR_RGB2BGR)

            if config.DISPLAY_FRAME_INFO:
                cv2.putText(
                    processed_bgr,
                    f'Frame: {frame_count}',
                    (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1,
                    (255, 255, 255),
                    2
                )

            cv2.imshow(config.OUTPUT_WINDOW_NAME, processed_bgr)
            if cv2.waitKey(config.WAIT_KEY_DELAY) & 0xFF == ord('q'):
                break

            frame_count += 1

        except Exception as e:
            print(f"⚠️ 第 {frame_count} 帧处理出错: {e}")
            cv2.imshow(config.OUTPUT_WINDOW_NAME, frame)


    cap.release()
    cv2.destroyAllWindows()

    # ========== 摄像头实时处理 ==========


def processCamera():
        cap = cv2.VideoCapture(config.CAMERA_ID)
        if not cap.isOpened():
            print(f"❌ 无法打开摄像头 (ID: {config.CAMERA_ID})，请检查设备或权限")
            return

        # 可选：设置分辨率（提高性能或画质）
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)

        print(f"📹 正在使用摄像头 (ID: {config.CAMERA_ID}) 进行实时车道检测...")
        print("▶️ 按 Q 键退出")

        frame_count = 0

        while True:
            ret, frame = cap.read()
            if not ret:
                print("⚠️ 无法读取摄像头画面，可能设备被占用或断开")
                break

            try:
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                processed = pipeline(frame_rgb)
                processed_bgr = cv2.cvtColor(processed, cv2.COLOR_RGB2BGR)

                if config.DISPLAY_FRAME_INFO:
                    cv2.putText(
                        processed_bgr,
                        f'Frame: {frame_count} | Camera: {config.CAMERA_ID}',
                        (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.8,
                        (255, 255, 255),
                        2
                    )

                cv2.imshow(config.OUTPUT_WINDOW_NAME, processed_bgr)

                if cv2.waitKey(config.WAIT_KEY_DELAY) & 0xFF == ord('q'):
                    print("⏹️ 用户主动退出")
                    break

                frame_count += 1

            except Exception as e:
                print(f"⚠️ 第 {frame_count} 帧处理出错: {e}")
                cv2.imshow(config.OUTPUT_WINDOW_NAME, frame)

        cap.release()
        cv2.destroyAllWindows()
        print("✅ 摄像头已关闭")


# ========== 主入口 ==========

if __name__ == "__main__":
    if config.IS_CAMERA_MODE:
        print("🎬 启动摄像头模式...")
        processCamera()
    elif config.IS_IMAGE_MODE:
        print("🖼️ 启动图片模式...")
        processImage()
    else:
        print("🎬 启动视频模式...")
        processVideo()