# 移除 pytesseract 导入和配置
import cv2
import numpy as np
import argparse
from ultralytics import YOLO
from PIL import Image
from paddleocr import PaddleOCR, draw_ocr
from PIL import Image, ImageDraw, ImageFont

def order_points(pts):
    """对四边形的四个顶点进行排序，顺序为：左上、右上、右下、左下"""
    rect = np.zeros((4, 2), dtype="float32")
    s = pts.sum(axis=1)
    rect[0] = pts[np.argmin(s)]
    rect[2] = pts[np.argmax(s)]
    diff = np.diff(pts, axis=1)
    rect[1] = pts[np.argmin(diff)]
    rect[3] = pts[np.argmax(diff)]
    return rect

def four_point_transform(image, pts):
    """对图像进行四点透视变换"""
    rect = order_points(pts)
    (tl, tr, br, bl) = rect
    
    # 计算输出图像的宽度
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))
    
    # 计算输出图像的高度
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))
    
    # 构建目标点集
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32")
    
    # 计算透视变换矩阵并应用
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
    return warped

def enhance_document(image):
    """增强文档图像"""
    # 转换为灰度图
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    
    # 直接进行自适应阈值处理
    binary = cv2.adaptiveThreshold(
        gray,
        255,
        cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
        cv2.THRESH_BINARY,
        25,  # 块大小
        10   # C值
    )
    
    return binary

def detect_document_with_yolo(image):
    """使用YOLO检测文档位置"""
    # 加载预训练模型
    model = YOLO('yolov8n.pt')
    
    # 运行推理
    results = model(image)
    
    # 获取检测结果
    boxes = results[0].boxes
    
    # 查找最大的检测框（假设最大的框是文档）
    max_area = 0
    document_box = None
    
    for box in boxes:
        # 获取类别
        cls = int(box.cls)
        # 只考虑某些类别（如：书籍、纸张等）
        if cls in [73, 74, 75, 76, 77]:  # book, notebook等类别
            x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
            area = (x2 - x1) * (y2 - y1)
            if area > max_area:
                max_area = area
                document_box = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]], dtype=np.int32)
    
    return document_box

def process_ocr(image, font_path="C:/Windows/Fonts/simhei.ttf"):
    """处理OCR识别和可视化"""
    # 使用PaddleOCR处理图片
    ocr = PaddleOCR(use_angle_cls=True, lang="ch")
    result = ocr.ocr(image, cls=True)
    
    # 准备OCR可视化
    image_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(image_pil)
    
    # 在识别区域绘制文本
    for line in result[0]:
        box = np.array(line[0], dtype=np.int32)
        text = line[1][0]
        
        # 计算文本框大小和调整字体
        text_height = max(point[1] for point in box) - min(point[1] for point in box)
        font_size = min(int(text_height * 0.8), 12)
        font = ImageFont.truetype(font_path, font_size)
        
        # 绘制白色背景和文本
        draw.polygon([tuple(p) for p in box], fill=(255, 255, 255))
        x = min(point[0] for point in box)
        y = min(point[1] for point in box) + 2
        draw.text((x, y), text, font=font, fill=(0, 0, 0))
    
    # 转换回OpenCV格式
    return cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2BGR)

def save_results(base_path, cropped_image, ocr_result):
    """保存处理结果"""
    cv2.imwrite(f"{base_path}_cropped.jpg", cropped_image)
    cv2.imwrite(f"{base_path}_ocr.jpg", ocr_result)
    print("处理结果已保存")

def detect_document(image_path):
    """检测并扫描文档"""
    # 读取图像
    image = cv2.imread(image_path)
    if image is None:
        raise ValueError("无法读取图像文件")
    
    # 创建输出窗口
    cv2.namedWindow("Document Scanner", cv2.WINDOW_NORMAL)
    
    # 使用YOLO检测文档
    document_box = detect_document_with_yolo(image)
    
    if document_box is not None:
        # 将检测框转换为文档轮廓格式并绘制
        document_contour = document_box.reshape(-1, 1, 2)
        cv2.drawContours(image, [document_contour], -1, (0, 255, 0), 2)
        
        # 获取并裁剪文档区域
        x1, y1 = np.min(document_box, axis=0)
        x2, y2 = np.max(document_box, axis=0)
        cropped_document = image[int(y1):int(y2), int(x1):int(x2)]
        
        # OCR处理
        ocr_result = process_ocr(cropped_document)
        
        # 调整大小并显示结果
        h, w = image.shape[:2]
        cropped_resized = cv2.resize(cropped_document, (w, h))
        ocr_resized = cv2.resize(ocr_result, (w, h))
        cv2.imshow("Document Scanner", np.hstack([cropped_resized, ocr_resized]))
        
        # 保存结果
        base_path = image_path.rsplit('.', 1)[0]
        save_results(base_path, cropped_document, ocr_result)
    else:
        print("未能检测到文档")
        cv2.imshow("Document Scanner", image)
    
    cv2.waitKey(0)
    cv2.destroyAllWindows()

def main():
    try:
        detect_document("D:\\code\\video-scan\\resource\\test01.jpg")
    except Exception as e:
        print(f"处理图像时出错: {str(e)}")

if __name__ == "__main__":
    main()