import os
import fitz  # PyMuPDF
from pathlib import Path
import sys
import json
import pymysql
from PIL import Image
import io
# from dbutils.pool import PooledDB # PyMuPDF
from dbutils.pooled_db import PooledDB
# --- 数据库配置 (根据实际情况修改) ---
DB_CONFIG = {
    'host': 'localhost',
    'user': 'root',
    'password': '123456',
    'database': 'superbackend',
    'charset': 'utf8mb4',
    'maxconnections': 10  # 连接池最大连接数
}

# --- 全局连接池 ---
db_pool = None

def get_db_pool():
    """获取或初始化数据库连接池"""
    global db_pool
    if db_pool is None:
        db_pool = PooledDB(
            creator=pymysql,
            **DB_CONFIG
        )
    return db_pool

def load_existing_image_paths(folder_key: str) -> set:
    """
    在处理开始前，从数据库加载指定 folder_key 下所有已存在的 image_path。
    这样可以避免在处理每张图片时都查询数据库进行去重。
    """
    pool = get_db_pool()
    conn = pool.connection()
    existing_paths = set()
    try:
        with conn.cursor() as cursor:
            sql = "SELECT image_path FROM images WHERE folder_key = %s AND status = 1"
            cursor.execute(sql, (folder_key,))
            rows = cursor.fetchall()
            existing_paths = {row[0] for row in rows}
        # print(f"[DB Load] Loaded {len(existing_paths)} existing paths for folder '{folder_key}'", file=sys.stderr) # 可选日志
    except Exception as e:
        print(f"[DB Load Error] 加载已有图片路径失败: {e}", file=sys.stderr)
    finally:
        conn.close()
    return existing_paths

def save_images_batch(batch_data: list):
    """批量将图片信息插入数据库"""
    if not batch_data:
        return
    pool = get_db_pool()
    conn = pool.connection()
    inserted_count = 0
    try:
        with conn.cursor() as cursor:
            sql = '''
            INSERT INTO images (pdf_uuid, folder_key, image_size, image_path, status)
            VALUES (%s, %s, %s, %s, %s)
            '''
            cursor.executemany(sql, batch_data)
        conn.commit()
        inserted_count = len(batch_data)
        # print(f"[DB Batch Insert] Successfully inserted {inserted_count} records.", file=sys.stderr) # 可选日志
    except Exception as e:
        conn.rollback()
        print(f"[DB Batch Error] 批量插入失败: {e}", file=sys.stderr)
    finally:
        conn.close()
    return inserted_count

def process_single_pdf(pdf_path: Path, output_path: Path, folder_key: str, existing_paths: set):
    """
    处理单个 PDF 文件。
    返回处理结果统计和待插入数据库的数据。
    """
    pdf_name = pdf_path.stem
    stats = {
        'saved': 0,
        'skipped_imgs': 0,
        'fullpages': 0,
        'batch_data': [] # 用于收集待批量插入的数据
    }

    # 简单的跳过检查：如果第一页的图片已存在，则认为整个PDF已处理
    first_image_path_db = f"/img/{folder_key}/{pdf_name}_1.png"
    if first_image_path_db in existing_paths:
        # print(f"[Skip PDF] PDF '{pdf_name}' seems already processed.", file=sys.stderr) # 可选日志
        return stats # 或者可以更严格地检查更多文件

    doc = None
    try:
        doc = fitz.open(pdf_path)
        image_counter = 1

        for page_num in range(len(doc)):
            page = doc.load_page(page_num)
            image_list = page.get_images(full=True)

            # --- 情况1：优先尝试整页截图 ---
            if len(image_list) > 3:
                try:
                    # 优化：使用 DPI 而非 Matrix，避免内存占用过高
                    dpi = 150
                    zoom = dpi / 72
                    mat = fitz.Matrix(zoom, zoom)
                    pix = page.get_pixmap(matrix=mat, alpha=False, colorspace=fitz.csRGB, annots=False)
                    
                    # 使用Pillow从字节流创建图像
                    img_data = pix.tobytes("ppm") # PPM格式通常更快
                    img = Image.open(io.BytesIO(img_data))

                    fullpage_filename = f"{pdf_name}_{image_counter}.png"
                    fullpage_path = output_path / fullpage_filename
                    image_path_for_db = f"/img/{folder_key}/{fullpage_filename}"

                    # 检查磁盘文件和数据库记录
                    if not fullpage_path.exists() and image_path_for_db not in existing_paths:
                        img.save(str(fullpage_path), 'PNG')
                        image_size = os.path.getsize(fullpage_path)
                        # 收集数据用于批量插入
                        stats['batch_data'].append((pdf_name, folder_key, image_size, image_path_for_db, 1))
                        existing_paths.add(image_path_for_db) # 更新内存集合
                        stats['fullpages'] += 1
                        image_counter += 1
                    else:
                        # print(f"[Skip File/DB] Fullpage image already exists: {fullpage_path} or {image_path_for_db}", file=sys.stderr) # 可选日志
                        pass

                    pix = None # 释放资源
                    continue # 已截图，跳过普通图片提取
                except Exception as e:
                    print(f"[Page Screenshot Error] PDF: {pdf_name}, Page: {page_num}, Error: {e}", file=sys.stderr)
                    # 整页截图失败，继续尝试普通图片提取

            # --- 情况2：提取该页的普通嵌入图片 ---
            for img_index, img in enumerate(image_list):
                try:
                    xref = img[0]
                    base_image = doc.extract_image(xref)
                    image_bytes = base_image["image"]
                    image_ext = base_image["ext"]

                    if len(image_bytes) <= 20 * 1024:  # 跳过小于 20KB 的图片
                        stats['skipped_imgs'] += 1
                        continue

                    # 标准化扩展名
                    if image_ext.lower() in ['jpeg', 'jpg']:
                        image_ext = 'jpg'
                    elif image_ext.lower() == 'tif':
                        image_ext = 'tiff' # PIL 默认处理 tiff

                    image_filename = f"{pdf_name}_{image_counter}.{image_ext}"
                    image_file_path = output_path / image_filename
                    image_path_for_db = f"/img/{folder_key}/{image_filename}"

                    # 检查磁盘文件和数据库记录
                    if not image_file_path.exists() and image_path_for_db not in existing_paths:
                        with open(image_file_path, "wb") as f:
                            f.write(image_bytes)
                        image_size = os.path.getsize(image_file_path)
                        # 收集数据用于批量插入
                        stats['batch_data'].append((pdf_name, folder_key, image_size, image_path_for_db, 1))
                        existing_paths.add(image_path_for_db) # 更新内存集合
                        stats['saved'] += 1
                        image_counter += 1
                    else:
                        # print(f"[Skip File/DB] Embedded image already exists: {image_file_path} or {image_path_for_db}", file=sys.stderr) # 可选日志
                        stats['skipped_imgs'] += 1

                except Exception as e:
                    print(f"[Image Extract Error] PDF: {pdf_name}, Page: {page_num}, Img Index: {img_index}, Error: {e}", file=sys.stderr)
                    # 单张图片出错，继续处理下一张

        # --- 处理完一个PDF后，刷新其批次数据 ---
        if stats['batch_data']:
             # 这里可以调用 save_images_batch(stats['batch_data']) 立即插入，
             # 或者返回数据由主进程统一管理批次。这里选择返回，由主进程控制批次大小。
             pass # 数据已在 stats['batch_data'] 中

    except Exception as e:
        print(f"[PDF Process Error] 处理PDF '{pdf_path}' 失败: {e}", file=sys.stderr)
    finally:
        if doc:
            doc.close() # 确保文档被关闭

    return stats

def main():
    if len(sys.argv) < 3:
        print(json.dumps({
            "status": "error",
            "message": "参数不足，需要输入文件夹路径和输出根目录路径"
        }))
        sys.exit(1)

    input_folder = Path(sys.argv[1])
    output_root = Path(sys.argv[2])

    if not input_folder.exists() or not input_folder.is_dir():
        print(json.dumps({
            "status": "error",
            "message": f"输入文件夹路径无效: {input_folder}"
        }))
        sys.exit(1)

    output_root.mkdir(parents=True, exist_ok=True)

    pdf_files = list(input_folder.glob("*.pdf")) # 使用 glob 更健壮

    if not pdf_files:
        print(json.dumps({
            "status": "error",
            "message": "没有找到PDF文件",
            "total_files": 0,
            "success_count": 0,
            "error_count": 0,
            "skipped_count": 0,
            "total_saved": 0,
            "total_skipped": 0,
            "total_fullpages": 0
        }))
        sys.exit(0)

    total_files = len(pdf_files)
    success_count = 0 # 成功处理的PDF数
    error_count = 0
    skipped_count = 0 # 被跳过的PDF数
    total_saved = 0 # 提取的图片数
    total_skipped = 0 # 跳过的图片数
    total_fullpages = 0 # 截图页数
    all_batch_data = [] # 收集所有待插入数据
    batch_insert_size = 50 # 批量插入的大小

    folder_key = input_folder.name

    # --- 1. 加载已存在的图片路径 ---
    print(f"[Init] Loading existing image paths for folder '{folder_key}'...", file=sys.stderr)
    existing_image_paths = load_existing_image_paths(folder_key)
    print(f"[Init] Loaded {len(existing_image_paths)} existing paths.", file=sys.stderr)

    # --- 2. 遍历并处理每个 PDF ---
    for i, pdf_file in enumerate(pdf_files):
        print(f"[Progress] Processing ({i+1}/{total_files}): {pdf_file.name}", file=sys.stderr)
        try:
            stats = process_single_pdf(pdf_file, output_root, folder_key, existing_image_paths)
            
            # 累积批次数据
            all_batch_data.extend(stats['batch_data'])
            
            # 实时更新统计
            total_saved += stats['saved']
            total_skipped += stats['skipped_imgs']
            total_fullpages += stats['fullpages']
            
            # 判断PDF是否成功处理（只要有输出）
            if stats['saved'] + stats['fullpages'] > 0:
                success_count += 1
            else:
                skipped_count += 1 # 可能是已处理过或无有效图片
                
            # --- 3. 批量插入数据库 ---
            if len(all_batch_data) >= batch_insert_size:
                print(f"[DB] Flushing batch of {len(all_batch_data)} records...", file=sys.stderr)
                save_images_batch(all_batch_data)
                all_batch_data.clear() # 清空已插入的数据
            
        except Exception as e:
            error_count += 1
            print(f"[Main Loop Error] 处理PDF '{pdf_file.name}' 时发生未预期错误: {e}", file=sys.stderr)

    # --- 4. 处理最后剩余的批次数据 ---
    if all_batch_data:
        print(f"[DB] Flushing final batch of {len(all_batch_data)} records...", file=sys.stderr)
        save_images_batch(all_batch_data)
        all_batch_data.clear()

    # --- 5. 返回最终统计结果 ---
    result = {
        "status": "completed",
        "total_files": total_files,
        "success_count": success_count,
        "error_count": error_count,
        "skipped_count": skipped_count, # 指被跳过的PDF
        "total_saved": total_saved,
        "total_skipped": total_skipped, # 指被跳过的小图片等
        "total_fullpages": total_fullpages
    }
    print(json.dumps(result, ensure_ascii=False))
    sys.exit(0)

if __name__ == '__main__':
    main()



