import os
import time
import uuid
import traceback
from argparse import ArgumentParser
from collections import defaultdict

import fitz  # fitz就是pip install PyMuPDF
import numpy as np  # Added import for numpy
from PIL import Image
from rapidocr_onnxruntime import RapidOCR
from tqdm import tqdm

from src.utils import logger

GOLBAL_STATE = {}

# OCR服务监控统计
OCR_STATS = {"requests": defaultdict(int), "failures": defaultdict(int), "service_status": defaultdict(str)}


def log_ocr_request(service_name: str, file_path: str, success: bool, processing_time: float, error_msg: str = None):
    """记录OCR请求统计信息"""
    # 更新统计
    OCR_STATS["requests"][service_name] += 1

    if not success:
        OCR_STATS["failures"][service_name] += 1
        OCR_STATS["service_status"][service_name] = "error"
        logger.error(f"OCR失败 - {service_name}: {os.path.basename(file_path)} - {error_msg}")
    else:
        OCR_STATS["service_status"][service_name] = "healthy"
        logger.info(f"OCR成功 - {service_name}: {os.path.basename(file_path)}")


def get_ocr_stats():
    """获取OCR服务统计信息"""
    stats = {}
    for service in OCR_STATS["requests"]:
        success_count = OCR_STATS["requests"][service] - OCR_STATS["failures"][service]
        success_rate = (success_count / OCR_STATS["requests"][service]) if OCR_STATS["requests"][service] > 0 else 0

        stats[service] = {
            "total_requests": OCR_STATS["requests"][service],
            "success_count": success_count,
            "failure_count": OCR_STATS["failures"][service],
            "success_rate": f"{success_rate:.2%}",
            "status": OCR_STATS["service_status"][service],
        }

    return stats


class OCRServiceException(Exception):
    """OCR服务异常"""

    def __init__(self, message, service_name=None, status_code=None):
        super().__init__(message)
        self.service_name = service_name
        self.status_code = status_code


class OCRPlugin:
    """OCR 插件"""

    def __init__(self, **kwargs):
        self.ocr = None
        self.det_box_thresh = kwargs.get("det_box_thresh", 0.3)
        self.model_dir_root = (
            os.getenv("MODEL_DIR") if not os.getenv("RUNNING_IN_DOCKER") else os.getenv("MODEL_DIR_IN_DOCKER")
        )

    def _check_rapid_ocr_availability(self):
        """检查RapidOCR模型是否可用"""
        try:
            model_dir = os.path.join(self.model_dir_root, "SWHL/RapidOCR")
            det_model_dir = os.path.join(model_dir, "PP-OCRv4/ch_PP-OCRv4_det_infer.onnx")
            rec_model_dir = os.path.join(model_dir, "PP-OCRv4/ch_PP-OCRv4_rec_infer.onnx")

            if not os.path.exists(model_dir):
                raise OCRServiceException(
                    f"模型目录不存在: {model_dir}。请下载 SWHL/RapidOCR 模型", "rapid_ocr", "model_not_found"
                )

            if not os.path.exists(det_model_dir) or not os.path.exists(rec_model_dir):
                raise OCRServiceException(
                    f"模型文件缺失。请确认模型文件完整: {det_model_dir}, {rec_model_dir}",
                    "rapid_ocr",
                    "model_incomplete",
                )

            return True

        except Exception as e:
            if isinstance(e, OCRServiceException):
                raise
            else:
                raise OCRServiceException(f"RapidOCR模型检查失败: {str(e)}", "rapid_ocr", "check_failed")

    def load_model(self):
        """加载 OCR 模型"""
        logger.info("加载 OCR 模型，仅在第一次调用时加载")

        # 先检查模型可用性
        self._check_rapid_ocr_availability()

        model_dir = os.path.join(self.model_dir_root, "SWHL/RapidOCR")
        det_model_dir = os.path.join(model_dir, "PP-OCRv4/ch_PP-OCRv4_det_infer.onnx")
        rec_model_dir = os.path.join(model_dir, "PP-OCRv4/ch_PP-OCRv4_rec_infer.onnx")

        try:
            self.ocr = RapidOCR(det_box_thresh=0.3, det_model_path=det_model_dir, rec_model_path=rec_model_dir)
            logger.info(f"OCR Plugin for det_box_thresh = {self.det_box_thresh} loaded.")
        except Exception as e:
            raise OCRServiceException(f"RapidOCR模型加载失败: {str(e)}", "rapid_ocr", "load_failed")

    def process_image(self, image, params=None):
        """
        对单张图像执行OCR并提取文本

        Args:
            image: 图像数据，支持多种格式：
                  - str: 图像文件路径
                  - PIL.Image: PIL图像对象
                  - numpy.ndarray: numpy图像数组
            params: 参数
        Returns:
            str: 提取的文本内容
        """
        # 确保模型已加载
        if self.ocr is None:
            self.load_model()

        # 处理不同类型的输入图像
        try:
            if isinstance(image, str):
                # 图像路径直接传递给OCR处理
                image_path = image
                is_temp_file = False
            else:
                # 创建临时文件
                is_temp_file = True
                image_path = self._create_temp_image_file(image)

            # 执行 OCR
            start_time = time.time()
            result, _ = self.ocr(image_path)
            processing_time = time.time() - start_time

            # 清理临时文件
            if is_temp_file and os.path.exists(image_path):
                os.remove(image_path)

            # 提取文本
            if result:
                text = "\n".join([line[1] for line in result])
                log_ocr_request("rapid_ocr", image_path, True, processing_time)
                return text
            else:
                log_ocr_request("rapid_ocr", image_path, False, processing_time, "OCR未能识别出文本内容")
                return ""

        except Exception as e:
            error_msg = f"OCR处理失败: {str(e)}"
            log_ocr_request("rapid_ocr", image_path, False, 0, error_msg)
            logger.error(error_msg)
            raise OCRServiceException(error_msg, "rapid_ocr", "processing_failed")

    def _create_temp_image_file(self, image):
        """
        将图像数据保存为临时文件

        Args:
            image: PIL.Image或numpy.ndarray格式的图像数据

        Returns:
            str: 临时文件路径
        """
        # 为临时文件创建目录（如果不存在）
        tmp_dir = os.path.join(os.getcwd(), "tmp")
        os.makedirs(tmp_dir, exist_ok=True)

        # 生成临时文件路径
        temp_filename = f"ocr_temp_{uuid.uuid4().hex[:8]}.png"
        image_path = os.path.join(tmp_dir, temp_filename)

        # 根据图像类型保存文件
        if isinstance(image, Image.Image):
            # 保存PIL图像对象到临时文件
            image.save(image_path)
        elif isinstance(image, np.ndarray):
            # 将numpy数组转换为PIL图像并保存
            Image.fromarray(image).save(image_path)
        else:
            raise ValueError("不支持的图像类型，必须是PIL.Image或numpy数组")

        return image_path

    def process_pdf(self, pdf_path, params=None):
        """
        处理PDF文件并提取文本
        :param pdf_path: PDF文件路径
        :param params: 参数
        :return: 提取的文本
        """

        if not os.path.exists(pdf_path):
            raise FileNotFoundError(f"PDF file not found: {pdf_path}")

        try:
            images = []

            pdfDoc = fitz.open(pdf_path)
            totalPage = pdfDoc.page_count
            for pg in tqdm(range(totalPage), desc="to images", ncols=100):
                page = pdfDoc[pg]
                rotate, zoom_x, zoom_y = 0, 2, 2
                mat = fitz.Matrix(zoom_x, zoom_y).prerotate(rotate)
                pix = page.get_pixmap(matrix=mat, alpha=False)
                img_pil = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
                images.append(img_pil)

            # 处理每个图像并合并文本
            all_text = []
            for img_path in tqdm(images, desc="to txt", ncols=100):
                text = self.process_image(img_path)
                all_text.append(text)

            logger.debug(f"PDF OCR result: {all_text[:50]}(...) total {len(all_text)} pages.")
            return "\n\n".join(all_text)

        except Exception as e:
            logger.error(f"PDF processing error: {str(e)}")
            return ""

    def process_file_mineru(self, file_path, params=None):
        """
        使用Mineru OCR处理文件
        :param file_path: 文件路径
        :param params: 参数
        :return: 提取的文本
        """
        import requests

        from .mineru import parse_doc

        mineru_ocr_uri = os.getenv("MINERU_OCR_URI", "http://localhost:30000")
        mineru_ocr_uri_health = f"{mineru_ocr_uri}/health"

        try:
            # 健康检查
            health_check_response = requests.get(mineru_ocr_uri_health, timeout=5)
            if health_check_response.status_code != 200:
                error_detail = "Unknown error"
                try:
                    error_detail = health_check_response.json()
                except Exception:
                    error_detail = health_check_response.text

                raise OCRServiceException(
                    f"MinerU OCR服务健康检查失败: {error_detail}", "mineru_ocr", "health_check_failed"
                )

        except Exception as e:
            if isinstance(e, OCRServiceException):
                raise
            raise OCRServiceException(f"MinerU OCR服务检查失败: {str(e)}", "mineru_ocr", "service_error")

        try:
            start_time = time.time()
            file_path_list = [file_path]
            output_dir = os.path.join(os.getcwd(), "tmp", "mineru_ocr")

            text = parse_doc(file_path_list, output_dir, backend="vlm-sglang-client", server_url=mineru_ocr_uri)[0]

            processing_time = time.time() - start_time
            log_ocr_request("mineru_ocr", file_path, True, processing_time)

            logger.debug(f"Mineru OCR result: {text[:50]}(...) total {len(text)} characters.")
            return text

        except Exception as e:
            processing_time = time.time() - start_time
            error_msg = f"MinerU OCR处理失败: {str(e)}"
            log_ocr_request("mineru_ocr", file_path, False, processing_time, error_msg)

            raise OCRServiceException(error_msg, "mineru_ocr", "processing_failed")

    def process_file_paddlex(self, pdf_path, params=None):
        """
        使用Paddlex OCR处理PDF文件
        :param pdf_path: PDF文件路径
        :param params: 参数
        :return: 提取的文本
        """
        from .paddlex import analyze_document, check_paddlex_health

        paddlex_uri = os.getenv("PADDLEX_URI", "http://localhost:8080")

        try:
            # 健康检查
            health_check_response = check_paddlex_health(paddlex_uri)
            if not health_check_response.ok:
                error_detail = "Unknown error"
                try:
                    error_detail = health_check_response.json()
                except Exception:
                    error_detail = health_check_response.text

                raise OCRServiceException(
                    f"PaddleX OCR服务健康检查失败: {error_detail}", "paddlex_ocr", "health_check_failed"
                )
        except Exception as e:
            if isinstance(e, OCRServiceException):
                raise
            raise OCRServiceException(f"PaddleX OCR服务检查失败: {str(e)}", "paddlex_ocr", "service_error")

        try:
            start_time = time.time()
            result = analyze_document(pdf_path, base_url=paddlex_uri)
            processing_time = time.time() - start_time

            if not result["success"]:
                error_msg = f"PaddleX OCR处理失败: {result['error']}"
                log_ocr_request("paddlex_ocr", pdf_path, False, processing_time, error_msg)

                raise OCRServiceException(error_msg, "paddlex_ocr", "processing_failed")

            log_ocr_request("paddlex_ocr", pdf_path, True, processing_time)
            return result["full_text"]

        except Exception as e:
            if isinstance(e, OCRServiceException):
                raise
            processing_time = time.time() - start_time if "start_time" in locals() else 0
            error_msg = f"PaddleX OCR处理失败: {str(e)}"
            log_ocr_request("paddlex_ocr", pdf_path, False, processing_time, error_msg)

            raise OCRServiceException(error_msg, "paddlex_ocr", "processing_failed")

    def _analyze_image_with_vlm(self, image_data_uri: str) -> str:
        """
        使用 VLM 模型分析图片内容，判断是否有实质性内容
        
        :param image_data_uri: 图片的 data URI (base64 编码)
        :return: 如果有实质性内容返回描述文本，否则返回空字符串
        """
        try:
            from src.models.chat import select_model
            
            # 使用 dashscope 的 qwen-vl-max 视觉模型
            provider = "dashscope"
            model_name = "qwen-vl-max-2025-04-02"
            
            # 加载 VL 模型
            try:
                logger.debug(f"Selecting model from `{provider}` with `{model_name}`")
                vlm = select_model(provider, model_name)
            except Exception as e:
                logger.warning(f"加载 VL 模型失败 ({provider}/{model_name}): {str(e)}，跳过图片分析")
                return ""
            
            # 构建多模态消息
            prompt = """请分析这张图片的内容。如果图片包含以下任何一种实质性内容，请回答"是"并简要描述图片内容：
                    - 有意义的文字、表格、图表
                    - 重要的照片、插图、示意图
                    - 具有参考价值的视觉信息

                    如果图片只是装饰性元素、背景、分隔线、空白区域或无实质内容，请只回答"否"。

                    请按以下格式回答：
                    判断: [是/否]
                    描述: [如果是"是"，请用一句话描述图片主要内容；如果是"否"，留空]"""
            
            # 构建多模态消息（OpenAI 格式）
            messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": prompt},
                        {"type": "image_url", "image_url": {"url": image_data_uri}}
                    ]
                }
            ]
            
            # 调用 VLM 进行分析
            try:
                response = vlm.call(messages, stream=False)
                content = response.content.strip()
                
                # 解析 VLM 回复（格式：判断: 是/否\n描述: xxx）
                lines = content.split('\n')
                judgment = ""
                description = ""
                
                for line in lines:
                    if line.startswith("判断:") or line.startswith("判断："):
                        judgment = line.split(':', 1)[1].strip() if ':' in line else line.split('：', 1)[1].strip()
                    elif line.startswith("描述:") or line.startswith("描述："):
                        description = line.split(':', 1)[1].strip() if ':' in line else line.split('：', 1)[1].strip()
                
                # 判断是否为实质性内容
                if "否" in judgment or not description:
                    logger.debug(f"VLM 判断：图片无实质性内容（判断: {judgment}）")
                    return ""
                
                logger.debug(f"VLM 分析结果 (判断: {judgment}): {description[:100]}...")
                return description
                
            except Exception as e:
                logger.warning(f"VLM 调用失败: {str(e)}")
                return ""
                
        except Exception as e:
            logger.warning(f"图片分析异常: {str(e)}")
            return ""

    def process_file_unstructured(self, file_path, params=None):
        """
        使用 Unstructured 处理文件（支持 PDF、图片等）
        高级文档解析，支持表格结构检测和图片提取

        :param file_path: 文件路径
        :param params: 参数（可包含 save_metadata=True 以保存元数据用于可视化）
        :return: 提取的 Markdown 格式文本
        """
        try:
            from langchain_unstructured import UnstructuredLoader
        except ImportError:
            raise OCRServiceException(
                "Unstructured 库未安装。请运行: pip install unstructured[pdf] langchain-unstructured",
                "unstructured",
                "import_error"
            )

        start_time = time.time()
        save_metadata = params.get("save_metadata", False) if params else False

        try:
            # 使用 UnstructuredLoader 提取文本和结构化内容（保留元数据）
            loader = UnstructuredLoader(
                file_path=file_path,
                strategy="hi_res",              # 高分辨率模式，支持复杂文档
                infer_table_structure=True,     # 自动解析表格结构
                ocr_languages="chi_sim+eng",    # 支持中英文 OCR
                ocr_engine="paddleocr"          # 指定 PaddleOCR 作为 OCR 引擎
            )
            
            # 加载文档
            documents = list(loader.load())
            
            # 如果需要保存元数据用于可视化
            if save_metadata:
                metadata_path = file_path + ".unstructured_metadata.json"
                import json
                metadata_list = []
                for doc in documents:
                    metadata_list.append({
                        "content": doc.page_content,
                        "metadata": doc.metadata
                    })
                with open(metadata_path, "w", encoding="utf-8") as f:
                    json.dump(metadata_list, f, ensure_ascii=False, indent=2)
                logger.info(f"元数据已保存到: {metadata_path}")
            
            # 提取图片并转换为 Base64 嵌入 Markdown
            import base64
            from io import BytesIO
            
            image_map = {}  # page_num -> list of {"data_uri": str, "description": str}
            try:
                doc = fitz.open(file_path)
                for page_num, page in enumerate(doc, start=1):
                    image_map[page_num] = []
                    for img_index, img in enumerate(page.get_images(full=True), start=1):
                        try:
                            xref = img[0]
                            pix = fitz.Pixmap(doc, xref)
                            
                            # 转换为 RGB（如果需要）
                            if pix.n >= 5:  # CMYK
                                pix = fitz.Pixmap(fitz.csRGB, pix)
                            
                            # 转换为 PNG 字节流
                            img_bytes = pix.tobytes("png")
                            
                            # 编码为 Base64
                            img_base64 = base64.b64encode(img_bytes).decode('utf-8')
                            
                            # 构建 data URI
                            data_uri = f"data:image/png;base64,{img_base64}"
                            
                            # 使用 VLM 分析图片内容
                            description = self._analyze_image_with_vlm(data_uri)
                            
                            # 只保存有实质性内容的图片
                            if description:
                                image_map[page_num].append({
                                    "data_uri": data_uri,
                                    "description": description
                                })
                                logger.debug(f"图片分析成功 (page {page_num}, img {img_index}): {description[:50]}...")
                            else:
                                logger.debug(f"图片无实质性内容，跳过 (page {page_num}, img {img_index})")
                            
                        except Exception as e:
                            logger.warning(f"处理图片失败 (page {page_num}, img {img_index}): {str(e)}")
                            continue
                            
                doc.close()
            except Exception as e:
                logger.warning(f"图片提取失败: {str(e)}")

            # 转换为 Markdown
            md_lines = []
            inserted_images = set()
            
            # 默认不嵌入base64图片到知识库，只保留图片描述
            include_base64 = params.get("include_base64_images", False) if params else False
            if not include_base64:
                logger.info("知识库模式：将使用图片描述替代 base64 图片，避免 chunk 过大")

            for doc in documents:
                metadata = doc.metadata
                text = doc.page_content
                cat = metadata.get("category", "Text")
                page_num = metadata.get("page_number")

                if cat == "Title" and text.strip().startswith("- "):
                    md_lines.append(text)
                elif cat == "Title":
                    md_lines.append(f"# {text}")
                elif cat in ["Header", "Subheader"]:
                    md_lines.append(f"## {text}")
                elif cat == "Table":
                    # 尝试获取 HTML 表格并转换
                    text_as_html = metadata.get("text_as_html")
                    if text_as_html:
                        try:
                            from html2text import html2text
                            table_md = html2text(text_as_html)
                            md_lines.append(table_md)
                        except ImportError:
                            md_lines.append(text)
                    else:
                        md_lines.append(text)
                elif cat == "Image":
                    # 插入 Base64 编码的图片及其描述
                    # 注意：知识库模式下不嵌入base64图片，避免chunk过大
                    if page_num and page_num in image_map:
                        for img_info in image_map[page_num]:
                            img_data_uri = img_info["data_uri"]
                            img_description = img_info["description"]
                            if img_data_uri not in inserted_images:
                                # 添加图片描述作为文本
                                md_lines.append(f"**图片描述:** {img_description}")
                                # 只在需要时添加图片（用于可视化展示，不用于知识库检索）
                                if include_base64:
                                    md_lines.append(f"![Image]({img_data_uri})")
                                inserted_images.add(img_data_uri)
                                break
                else:
                    md_lines.append(text)

            result_text = "\n\n".join(md_lines)
            processing_time = time.time() - start_time
            
            # 如果是 PDF 文件，自动生成可视化数据并保存
            if save_metadata and file_path.lower().endswith('.pdf'):
                try:
                    visualization_data = self._generate_visualization(file_path, documents)
                    visualization_path = file_path + ".visualization.json"
                    with open(visualization_path, "w", encoding="utf-8") as f:
                        json.dump(visualization_data, f, ensure_ascii=False, indent=2)
                    logger.info(f"可视化数据已保存到: {visualization_path}")
                except Exception as e:
                    logger.warning(f"生成可视化数据失败: {str(e)}")
            
            log_ocr_request("unstructured", file_path, True, processing_time)
            logger.debug(f"Unstructured 处理完成: {result_text[:100]}... (共 {len(result_text)} 字符)")
            
            return result_text
            
        except Exception as e:
            processing_time = time.time() - start_time
            error_msg = f"Unstructured 处理失败: {str(e)}"
            log_ocr_request("unstructured", file_path, False, processing_time, error_msg)
            raise OCRServiceException(error_msg, "unstructured", "processing_failed")

    def _generate_visualization(self, file_path, documents):
        """
        生成 PDF 文档的可视化标注数据
        
        :param file_path: PDF 文件路径
        :param documents: UnstructuredLoader 加载的文档列表
        :return: 包含标注图片的可视化数据
        """
        import json
        import base64
        import io
        import matplotlib.patches as patches
        import matplotlib.pyplot as plt
        from PIL import Image as PILImage
        
        # 提取元数据
        metadata = []
        for doc in documents:
            meta = doc.metadata.copy()
            meta["text"] = doc.page_content
            meta["category"] = meta.get("category", "Text")
            metadata.append(meta)
        
        # 打开 PDF 文件
        pdf_doc = fitz.open(file_path)
        total_pages = pdf_doc.page_count
        
        # 为每一页生成标注图片
        annotated_pages = []
        category_to_color = {
            "Title": "orchid",
            "Image": "forestgreen",
            "Table": "tomato",
        }
        
        for page_num in range(1, total_pages + 1):
            # 获取该页的元素
            page_elements = [el for el in metadata if el.get("page_number") == page_num]
            
            # 渲染 PDF 页面
            pdf_page = pdf_doc.load_page(page_num - 1)
            pix = pdf_page.get_pixmap(matrix=fitz.Matrix(2, 2))  # 2倍分辨率
            
            if not page_elements:
                # 如果该页没有元素，直接返回原始PDF页面
                img_bytes = pix.tobytes("png")
                img_base64 = base64.b64encode(img_bytes).decode('utf-8')
                annotated_pages.append({
                    "page_number": page_num,
                    "image": img_base64,
                    "elements": []
                })
                continue
            
            # 转换为 PIL 图像
            pil_image = PILImage.frombytes("RGB", [pix.width, pix.height], pix.samples)
            
            # 创建 matplotlib 图形
            fig, ax = plt.subplots(1, figsize=(pix.width/100, pix.height/100), dpi=100)
            ax.imshow(pil_image)
            
            categories = set()
            
            # 绘制标注框
            for element in page_elements:
                if "coordinates" not in element:
                    continue
                
                coords = element["coordinates"]
                if "points" not in coords:
                    continue
                
                points = coords["points"]
                layout_width = coords.get("layout_width", pix.width)
                layout_height = coords.get("layout_height", pix.height)
                
                # 坐标缩放
                scaled_points = [
                    (x * pix.width / layout_width, y * pix.height / layout_height)
                    for x, y in points
                ]
                
                category = element.get("category", "Text")
                box_color = category_to_color.get(category, "deepskyblue")
                categories.add(category)
                
                # 绘制多边形框
                rect = patches.Polygon(
                    scaled_points, 
                    linewidth=2, 
                    edgecolor=box_color, 
                    facecolor="none"
                )
                ax.add_patch(rect)
            
            # 添加图例
            legend_handles = [patches.Patch(color="deepskyblue", label="Text")]
            for category in ["Title", "Image", "Table"]:
                if category in categories:
                    legend_handles.append(
                        patches.Patch(color=category_to_color[category], label=category)
                    )
            ax.axis("off")
            ax.legend(handles=legend_handles, loc="upper right", fontsize=8)
            plt.tight_layout(pad=0)
            
            # 将图形保存为 Base64
            buf = io.BytesIO()
            plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0, dpi=100)
            plt.close(fig)
            buf.seek(0)
            img_base64 = base64.b64encode(buf.read()).decode('utf-8')
            
            annotated_pages.append({
                "page_number": page_num,
                "image": img_base64,
                "elements": page_elements
            })
        
        pdf_doc.close()
        
        return {
            "filename": os.path.basename(file_path),
            "total_pages": total_pages,
            "annotated_pages": annotated_pages,
            "created_at": time.strftime("%Y-%m-%d %H:%M:%S")
        }


def get_state(task_id):
    return GOLBAL_STATE.get(task_id, {})


def plainreader(file_path):
    """读取普通文本文件并返回text文本"""
    assert os.path.exists(file_path), "File not found"

    with open(file_path) as f:
        text = f.read()
    return text


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--pdf-path", type=str, required=True, help="Path to the PDF file")
    parser.add_argument("--return-text", action="store_true", help="Return the extracted text")
    args = parser.parse_args()

    ocr = OCRPlugin()
    text = ocr.process_pdf(args.pdf_path)
    print(text)
