# A_ocr_capture.py（增强版 V2）
# 新增功能：图像增强（对比度/红色饱和度）+ 模拟A4尺寸裁剪

import cv2
import os
import base64
import json
import datetime
import numpy as np  # 导入numpy用于复杂的数组计算
from openai import OpenAI

# ====== 路径配置（全部为绝对路径）======
BASE_DIR = r"E:\沙粒云\自媒体\2025视频制作\20250610ai高考\流程\anwser\data"
IMAGE_FOLDER = r"E:\沙粒云\自媒体\2025视频制作\20250610ai高考\流程\aigaokao_ui\pic"
OUTPUT_FULL_TEXT = os.path.join(BASE_DIR, "output_full_exam.txt")
LOG_FILE = os.path.join(BASE_DIR, "ocr_log.txt")

# ====== Qwen 接口配置 ======
API_KEY = "sk-c48712427ba84b59b9c89fc2883e48b6"
BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
QWEN_VL_MODEL = "qwen-vl-max"


# ====== 初始化 ======
os.makedirs(IMAGE_FOLDER, exist_ok=True)
with open(LOG_FILE, "w", encoding="utf-8") as f:
    f.write(f"[{datetime.datetime.now()}] 日志文件创建/重置\n")

qwen_client = OpenAI(api_key=API_KEY, base_url=BASE_URL)

def log(msg: str):
    timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    line = f"[{timestamp}] {msg}"
    print(line)
    with open(LOG_FILE, "a", encoding="utf-8") as f:
        f.write(line + "\n")

def encode_image_to_base64(path: str) -> str:
    with open(path, "rb") as f:
        return base64.b64encode(f.read()).decode("utf-8")

def get_image_mime_type(path: str) -> str:
    ext = os.path.splitext(path)[1].lower()
    if ext in ['.jpg', '.jpeg']:
        return 'image/jpeg'
    elif ext == '.png':
        return 'image/png'
    elif ext == '.webp':
        return 'image/webp'
    else:
        raise ValueError(f"Unsupported image format: {ext}")

# ==================================================================
# ====================== 新增的图像处理函数 ========================
# ==================================================================

def enhance_image(image):
    """
    增强图像：增加对比度，并提升颜色饱和度（尤其是红色）。
    """
    # 1. 增加对比度 (alpha > 1)，beta是亮度，保持不变
    enhanced = cv2.convertScaleAbs(image, alpha=1.3, beta=0)

    # 2. 转换到HSV色彩空间以调整饱和度
    hsv = cv2.cvtColor(enhanced, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)

    # 3. 调整红色饱和度
    # 定义红色的色调范围 (Hue values for red are approximately 0-10 and 170-180 in OpenCV's 0-179 range)
    lower_red_hue = 0
    upper_red_hue = 10
    lower_red_hue_wrap = 170
    upper_red_hue_wrap = 180

    # 创建红色区域的掩码
    red_mask = cv2.inRange(h, lower_red_hue, upper_red_hue) + cv2.inRange(h, lower_red_hue_wrap, upper_red_hue_wrap)

    # 对红色区域增加饱和度
    s_red = np.where(red_mask > 0, np.clip(s * 2.0, 0, 255).astype(np.uint8), s)

    # 4. 降低非红色区域的饱和度
    # 创建非红色区域的掩码
    non_red_mask = cv2.bitwise_not(red_mask)
    s_non_red = np.where(non_red_mask > 0, np.clip(s * 0.6, 0, 255).astype(np.uint8), s_red) # 使用调整后的红色饱和度作为基础

    # 5. 合并通道并将图像转回BGR
    final_hsv = cv2.merge([h, s_non_red, v])
    final_image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)

    return final_image

def process_for_a4(image):
    """
    将图像缩放和裁剪以模拟A4纸张。
    A4: 210mm x 297mm
    """
    # 1. 设定A4宽度基准为2100像素，方便计算 (1mm = 10px)
    TARGET_WIDTH = 2100
    TOP_CROP_MM = 62
    BOTTOM_CROP_MM = 10
    
    # 2. 等比例缩放图片，使其宽度达到目标宽度
    h, w, _ = image.shape
    ratio = TARGET_WIDTH / w
    new_height = int(h * ratio)
    resized_image = cv2.resize(image, (TARGET_WIDTH, new_height), interpolation=cv2.INTER_AREA)

    # 3. 根据毫米数计算需要裁剪的像素数
    pixels_per_mm = TARGET_WIDTH / 210  # 应该是 10
    top_crop_px = int(TOP_CROP_MM * pixels_per_mm)
    bottom_crop_px = int(BOTTOM_CROP_MM * pixels_per_mm)

    # 4. 执行裁剪
    # 裁剪范围：从顶部 Y=top_crop_px 开始，到 Y=(总高度 - bottom_crop_px) 结束
    final_h, final_w, _ = resized_image.shape
    start_row = top_crop_px
    end_row = final_h - bottom_crop_px

    # 安全检查，防止裁剪区域无效
    if start_row >= end_row:
        log(f"警告：图片高度太小({final_h}px)，无法按要求裁剪，将返回未裁剪的图片。")
        return resized_image

    cropped_image = resized_image[start_row:end_row, :]
    log(f"图片已处理：缩放至({final_w}, {final_h})，裁剪掉顶部{top_crop_px}px和底部{bottom_crop_px}px。")

    return cropped_image

# ==================================================================
# ==================================================================
# ==================================================================

def ocr_whole_page_stream(image_path: str, page_num: int) -> str:
    log(f"Step: 开始对第 {page_num} 页图片做 OCR -> {image_path}")
    mime = get_image_mime_type(image_path)
    b64 = encode_image_to_base64(image_path)

    response = qwen_client.chat.completions.create(
        model=QWEN_VL_MODEL,
        messages=[
            {
                "role": "system",
                "content": [{"type": "text", "text": "你是一个试卷识别助手，请准确提取试卷中的所有文字内容，不要添加任何解释或说明，直接输出试卷原文。"}]
            },
            {
                "role": "user",
                "content": [
                    {"type": "image_url", "image_url": {"url": f"data:{mime};base64,{b64}"}},
                    {"type": "text", "text": "请准确提取这张试卷中的所有文字内容"}
                ]
            }
        ],
        stream=True
    )

    page_text = ""
    for chunk in response:
        delta = chunk.choices[0].delta
        if getattr(delta, "content", None):
            content = delta.content
            print(content, end="", flush=True)
            with open(LOG_FILE, "a", encoding="utf-8") as f:
                f.write(content)
            page_text += content
    print()
    log(f"Step: 第 {page_num} 页 OCR 完成，共 {len(page_text)} 字")
    return page_text

def capture_images_only() -> list:
    cap = cv2.VideoCapture(0)
    if not cap.isOpened():
        log("错误：无法打开摄像头")
        return []

    # 设置高分辨率
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 3840)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 2160)

    log("Step: 摄像头已启动，按 空格 拍摄（自动旋转），按 ESC 结束。")
    saved_paths = []
    page_num = 1

    while True:
        ret, frame = cap.read()
        if not ret:
            log("警告：摄像头读取失败。")
            break
        preview = cv2.resize(frame, (0, 0), fx=0.33, fy=0.33)
        cv2.imshow("按 空格 拍照，ESC 结束", preview)
        key = cv2.waitKey(1)
        if key == 27:  # ESC
            log("Step: ESC 检测到，结束拍摄。")
            break
        elif key == 32:  # SPACE
            log(f"--- 开始处理第 {page_num} 页 ---")
            # 1. 自动逆时针旋转 90 度
            rotated_frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
            log("Step 1: 图片已旋转90度。")
            
            # 2. 【新增】图像增强
            enhanced_frame = enhance_image(rotated_frame)
            log("Step 2: 图像已增强（对比度/饱和度）。")

            # 3. 【新增】模拟A4尺寸处理
            final_frame = process_for_a4(enhanced_frame)
            log("Step 3: 图像已按A4比例裁剪。")

            # 4. 命名并保存最终处理好的图片
            img_name = f"{page_num}.jpg"
            img_path = os.path.join(IMAGE_FOLDER, img_name)
            cv2.imwrite(img_path, final_frame) # 保存最终处理后的图片
            
            log(f"✅ Step 4: 保存第 {page_num} 页 -> {img_path}")
            saved_paths.append(img_path)
            page_num += 1

    cap.release()
    cv2.destroyAllWindows()
    return saved_paths

def main():
    saved_images = capture_images_only()
    if not saved_images:
        log("未拍摄任何图片，程序退出。")
        return

    all_pages_text = []
    for idx, img_path in enumerate(saved_images, start=1):
        log(f"=================== 开始 OCR: 第 {idx} 页 ===================")
        page_text = ocr_whole_page_stream(img_path, idx)
        marked = f"【第{idx}页】\n{page_text}\n\n"
        all_pages_text.append(marked)

    log("Step: 所有页识别完成，写入最终文本文件。")
    full_text = "".join(all_pages_text)
    with open(OUTPUT_FULL_TEXT, "w", encoding="utf-8") as f:
        f.write(full_text)
    log(f"Step: OCR 输出已保存 -> {OUTPUT_FULL_TEXT}")
    log("✅ A 代码全部执行完毕。")

if __name__ == "__main__":
    main()