import cv2
import os
import numpy as np
import argparse
from ultralytics import YOLO
from PIL import Image
from paddleocr import PaddleOCR, draw_ocr
from PIL import Image, ImageDraw, ImageFont


def detect_document_with_yolo(image):
    """使用YOLO检测文档位置"""
    # 加载预训练模型
    model = YOLO('yolov8n.pt')
    
    # 运行推理
    results = model(image)
    
    # 获取检测结果
    boxes = results[0].boxes
    
    # 查找最大的检测框（假设最大的框是文档）
    max_area = 0
    document_box = None
    
    for box in boxes:
        cls = int(box.cls)
        if cls in [73, 74, 75, 76, 77]:  # book, notebook等类别
            x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
            area = (x2 - x1) * (y2 - y1)
            if area > max_area:
                max_area = area
                document_box = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]], dtype=np.int32)
    
    return document_box

def enhance_document(image):
    """增强文档图像"""
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    binary = cv2.adaptiveThreshold(
        gray,
        255,
        cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
        cv2.THRESH_BINARY,
        25,
        10
    )
    return binary

def calculate_similarity(img1, img2):
    """计算两张图片的相似度"""
    img1 = cv2.resize(img1, (300, 300))
    img2 = cv2.resize(img2, (300, 300))
    
    hist1 = cv2.calcHist([img1], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
    hist2 = cv2.calcHist([img2], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
    
    hist1 = cv2.normalize(hist1, hist1).flatten()
    hist2 = cv2.normalize(hist2, hist2).flatten()
    
    return cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL)

def process_ocr(image, font_path="C:/Windows/Fonts/simhei.ttf"):
    """处理OCR识别和可视化"""
    # 使用PaddleOCR处理图片
    ocr = PaddleOCR(use_angle_cls=True, lang="ch")
    result = ocr.ocr(image, cls=True)
    
    # 准备OCR可视化
    image_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(image_pil)
    
    # 提取文本并在图像上绘制
    texts = []
    if result[0]:
        for line in result[0]:
            box = np.array(line[0], dtype=np.int32)
            text = line[1][0]
            texts.append(text)
            
            # 计算文本框大小和调整字体
            text_height = max(point[1] for point in box) - min(point[1] for point in box)
            font_size = min(int(text_height * 0.8), 12)
            font = ImageFont.truetype(font_path, font_size)
            
            # 绘制白色背景和文本
            draw.polygon([tuple(p) for p in box], fill=(255, 255, 255))
            x = min(point[0] for point in box)
            y = min(point[1] for point in box) + 2
            draw.text((x, y), text, font=font, fill=(0, 0, 0))
    
    # 转换回OpenCV格式的图像
    ocr_image = cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2BGR)
    
    return '\n'.join(texts), ocr_image

def evaluate_image_quality(image):
    """
    综合评估图像清晰度
    返回: 清晰度得分(0-100)和详细评分信息的字典
    """
    # 转换为灰度图
    if len(image.shape) == 3:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        gray = image.copy()
    
    scores = {}
    
    # 1. 拉普拉斯方差法
    laplacian = cv2.Laplacian(gray, cv2.CV_64F)
    scores['laplacian'] = laplacian.var()
    
    # 2. 梯度幅值法
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
    scores['gradient'] = np.mean(np.sqrt(sobelx**2 + sobely**2))
    
    # 3. Tenengrad算法
    teng = cv2.Laplacian(gray, cv2.CV_64F)
    scores['tenengrad'] = np.mean(teng**2)
    
    # 4. 频域分析
    dft = cv2.dft(np.float32(gray), flags=cv2.DFT_COMPLEX_OUTPUT)
    dft_shift = np.fft.fftshift(dft)
    magnitude_spectrum = 20 * np.log(cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1]))
    scores['frequency'] = np.mean(magnitude_spectrum)
    
    # 归一化各个得分
    max_scores = {
        'laplacian': 1000,  # 根据实际情况调整
        'gradient': 50,
        'tenengrad': 500,
        'frequency': 100
    }
    
    normalized_scores = {
        k: min(100, max(0, v * 100 / max_scores[k]))
        for k, v in scores.items()
    }
    
    # 计算综合得分（可以调整权重）
    weights = {
        'laplacian': 0.3,
        'gradient': 0.3,
        'tenengrad': 0.2,
        'frequency': 0.2
    }
    
    final_score = sum(normalized_scores[k] * weights[k] for k in normalized_scores)
    
    return final_score, normalized_scores

def extract_frames(video_path, output_folder, rotate=True, similarity_threshold=0.98, quality_threshold=60):
    """从视频中提取不重复的清晰帧并进行OCR识别"""
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    
    # 加载YOLO模型
    model = YOLO('yolov8n.pt')
    
    # 读取视频帧
    cap = cv2.VideoCapture(video_path)
    frame_count = 0
    temp_frames = []
    quality_scores = []
    
    print("正在读取视频帧并评估质量...")
    while True:
        ret, frame = cap.read()
        if not ret:
            break
            
        if rotate:
            frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
        
        # 评估图像质量
        quality_score, detailed_scores = evaluate_image_quality(frame)
        
        # 只保留高质量帧
        if quality_score >= quality_threshold:
            temp_frames.append(frame)
            quality_scores.append(quality_score)
            print(f"帧 {frame_count}: 质量得分 {quality_score:.2f}")
        else:
            print(f"帧 {frame_count}: 质量得分 {quality_score:.2f} - 已跳过")
        
        frame_count += 1
    
    cap.release()
    
    # 去除重复帧
    unique_frames = []
    unique_scores = []
    i = 0
    
    print("\n正在去除重复帧...")
    while i < len(temp_frames):
        current_frame = temp_frames[i]
        current_score = quality_scores[i]
        best_frame_idx = i
        best_score = current_score
        
        # 在相似帧中选择质量最高的
        j = i + 1
        while j < len(temp_frames):
            similarity = calculate_similarity(current_frame, temp_frames[j])
            if similarity > similarity_threshold:
                if quality_scores[j] > best_score:
                    best_frame_idx = j
                    best_score = quality_scores[j]
                j += 1
            else:
                break
        
        unique_frames.append(temp_frames[best_frame_idx])
        unique_scores.append(best_score)
        i = j
    
    print(f"\n总帧数: {frame_count}")
    print(f"清晰帧数: {len(temp_frames)}")
    print(f"去重后保存帧数: {len(unique_frames)}")
    
    # 保存不重复的帧并进行OCR
    for idx, frame in enumerate(unique_frames):
        # 检测文档区域
        document_box = detect_document_with_yolo(frame)
        
        if document_box is not None:
            # 裁剪文档区域
            x1, y1 = np.min(document_box, axis=0)
            x2, y2 = np.max(document_box, axis=0)
            cropped_document = frame[int(y1):int(y2), int(x1):int(x2)]
            
            # 保存原始图片和裁剪后的图片
            frame_name = f'frame_{idx:04d}'
            frame_path = os.path.join(output_folder, f'{frame_name}.jpg')
            cropped_path = os.path.join(output_folder, f'{frame_name}_cropped.jpg')
            cv2.imwrite(frame_path, frame)
            cv2.imwrite(cropped_path, cropped_document)
            
            # OCR处理并保存结果
            ocr = PaddleOCR(use_angle_cls=True, lang="ch")
            result = ocr.ocr(cropped_document, cls=True)
            
            # 提取文本内容并生成可视化结果
            texts = []
            if result[0]:
                for line in result[0]:
                    texts.append(line[1][0])
            
            # 生成OCR可视化结果
            ocr_image = process_ocr(cropped_document)
            
            # 保存文本和OCR结果图片
            text_path = os.path.join(output_folder, f'{frame_name}.txt')
            ocr_path = os.path.join(output_folder, f'{frame_name}_ocr.jpg')
            
            with open(text_path, 'w', encoding='utf-8') as f:
                f.write('\n'.join(texts))
            cv2.imwrite(ocr_path, ocr_image)
        else:
            print(f"帧 {idx} 未检测到文档")
    
    print(f"总帧数: {frame_count}")
    print(f"去重后保存帧数: {len(unique_frames)}")
    print(f"OCR结果已保存到对应的txt文件中")

def main():
    parser = argparse.ArgumentParser(description='从视频中提取帧并进行文档识别和OCR')
    
    # 添加必要的命令行参数
    parser.add_argument('--video', type=str, required=True,
                      help='输入视频文件的路径')
    parser.add_argument('--output', type=str, required=True,
                      help='输出文件夹路径')
    parser.add_argument('--similarity', type=float, default=0.98,
                      help='帧相似度阈值，范围0-1，默认0.98')
    parser.add_argument('--rotate', action='store_true',
                      help='是否需要旋转视频帧90度')
    parser.add_argument('--quality', type=float, default=95,
                      help='图像质量阈值，范围0-100，默认60')
    
    args = parser.parse_args()
    
    try:
        extract_frames(
            video_path=args.video,
            output_folder=args.output,
            rotate=args.rotate,
            similarity_threshold=args.similarity,
            quality_threshold=args.quality
        )
    except Exception as e:
        print(f"处理视频时出错: {str(e)}")

r"""
python d:\code\video-scan\opencv-document-scanner\myself_pdf_image\extract_frames.py -
-video D:\code\video-scan\resource\翻书视频.mp4 
--output D:\code\video-scan\frames --rotate --similarity 0.98
"""
if __name__ == "__main__":
    main()


