# -*- coding: utf-8 -*-
# @File:     parse_d.py
# @Author:
# @DateTime: 2025/10/22/9:33
import sys
from pathlib import Path
current_path = Path(__file__).resolve().parent
sys.path.append(str(current_path))

import json
from os import PathLike

from typing import Any, Callable, Dict, Iterator, Optional, Union
import tempfile
import os
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
import numpy as np
from paddleocr import PaddleOCR
from pdf2image import convert_from_path
# from pptx import Presentation
from PIL import Image

# 将模型放在同级目录paddlex文件夹下

class OCRPlugin:
    """
    OCR插件
    """
    def __init__(self):

        self.ocr = None

        self.init_model()

    def init_model(self):
        self.ocr = PaddleOCR(use_textline_orientation=True,
                            doc_orientation_classify_model_dir=str(Path.joinpath(current_path, 'paddlex/official_models/PP-LCNet_x1_0_doc_ori')),  # './paddlex/official_models/PP-LCNet_x1_0_doc_ori',
                            doc_unwarping_model_dir=str(Path.joinpath(current_path, 'paddlex/official_models/UVDoc')),  # './paddlex/official_models/UVDoc',
                            text_detection_model_dir=str(Path.joinpath(current_path, 'paddlex/official_models/PP-OCRv5_server_det')),  # './paddlex/official_models/PP-OCRv5_server_det',
                            textline_orientation_model_dir=str(Path.joinpath(current_path, 'paddlex/official_models/PP-LCNet_x1_0_textline_ori')),  # './paddlex/official_models/PP-LCNet_x1_0_textline_ori',
                            text_recognition_model_dir=str(Path.joinpath(current_path, 'paddlex/official_models/PP-OCRv5_server_rec')),  # './paddlex/official_models/PP-OCRv5_server_rec',
                        )

    def process_image(self, image: Union[str, np.array]):
        result = self.ocr.predict(image)[0]
        rec_texts = result['rec_texts']
        return rec_texts


def process_pdf(pdf_path):
    """
    处理PDF
    :param pdf_path:
    :return:
    """

    ocr = OCRPlugin()
    # 需自行实现PDF转图片逻辑
    images = convert_from_path(pdf_path)
    text = ""
    for img in images:
        # print(type(img))
        np_img = np.array(img)
        # print(type(np_img))
        rec_texts = ocr.process_image(np_img)
        text += "\n".join(rec_texts)
    return text


def process_image(img_path):
    """
    处理图片
    :param img_path:
    :return:
    """
    ocr = OCRPlugin()
    img = Image.open(img_path)
    text = ""
    np_img = np.array(img)
    rec_texts = ocr.process_image(np_img)
    text += "\n".join(rec_texts)
    return text


# def process_ppt(ppt_path):
#     """
#     处理PPT
#     :param ppt_path:
#     :return:
#     """
#     prs = Presentation(ppt_path)
#     text_li = []
#     for slide in prs.slides:
#         for shape in slide.shapes:
#             if hasattr(shape, "text"):
#                 text_li.append(shape.text)
#     text = "\n".join(text_li)
#     return text


def _extract_word_text(file_path: Path) -> str:
    """
    Parse Word documents (.doc/.docx) into plain text.

    Try python-docx first for docx files and fall back to the unstructured
    loader so legacy .doc files are still parsed when possible.
    """
    try:
        from docx import Document  # type: ignore

        doc = Document(file_path)  # type: ignore
        text = "\n".join(paragraph.text for paragraph in doc.paragraphs).strip()
        if text:
            return text
    except Exception as docx_error:  # noqa: BLE001
        print(docx_error)

    try:
        loader = UnstructuredWordDocumentLoader(str(file_path))
        docs = loader.load()
        return "\n".join(doc.page_content for doc in docs).strip()
    except Exception as unstructured_error:  # noqa: BLE001
        raise ValueError(f"无法解析 Word 文档: {file_path.name}") from unstructured_error


def del_temp_file(file_path):
    try:
        os.remove(file_path)
    except:
        pass


def process_file_to_markdown(file_bytes: bytes, file_org_path: str, **kwargs) -> str:

    """
    将不同类型转换为str
    :param file_bytes:
    :param kwargs:
    :return:
    """
    file_type = Path(file_org_path).suffix.lower()
    file_org_name = Path(file_org_path).name
    tmp_file = tempfile.TemporaryFile(mode='wb', delete=False)
    tmp_file.write(file_bytes)
    file_name = tmp_file.name
    tmp_file.close()
    file_path_obj = Path(file_name)

    if file_type in [".txt", ".md"]:
        with open(file_name, encoding="utf-8") as f:
            content = f.read()
        del_temp_file(file_name)
        return f"# {file_org_name}\n\n{content}"
    elif file_type in [".doc", ".docx"]:
        # 处理 Word 文档
        text = _extract_word_text(file_path_obj)
        del_temp_file(file_name)
        return f"# {file_org_name}\n\n{text}"
    elif file_type in [".html", ".htm"]:
        # 使用 BeautifulSoup 处理 HTML 文件
        from markdownify import markdownify as md
        with open(file_path_obj, encoding="utf-8") as f:
            content = f.read()
        text = md(content, heading_style="ATX")
        del_temp_file(file_name)
        return f"# {file_org_name}\n\n{text}"
    elif file_type == ".csv":
        # 处理 CSV 文件
        import pandas as pd

        df = pd.read_csv(file_path_obj)
        # 将每一行数据与表头组合成独立的表格
        markdown_content = f"# {file_org_name}\n\n"

        for index, row in df.iterrows():
            # 创建包含表头和当前行的小表格
            row_df = pd.DataFrame([row], columns=df.columns)
            markdown_table = row_df.to_markdown(index=False)
            markdown_content += f"{markdown_table}\n\n"
        del_temp_file(file_name)
        return markdown_content.strip()

    elif file_type in [".xls", ".xlsx"]:
        # 处理 Excel 文件
        import pandas as pd

        # 读取所有工作表
        excel_file = pd.ExcelFile(file_path_obj)
        markdown_content = f"# {file_org_name}\n\n"

        for sheet_name in excel_file.sheet_names:
            df = pd.read_excel(file_path_obj, sheet_name=sheet_name)
            markdown_content += f"## {sheet_name}\n\n"
            # 作为一个大表格
            markdown_table = df.to_markdown(index=False)
            markdown_content += f"{markdown_table}\n\n"
            # # 将每一行数据与表头组合成独立的表格
            # for index, row in df.iterrows():
            #     # 创建包含表头和当前行的小表格
            #     row_df = pd.DataFrame([row], columns=df.columns)
            #     markdown_table = row_df.to_markdown(index=False)
            #     markdown_content += f"{markdown_table}\n\n"
        del_temp_file(file_name)
        return markdown_content.strip()

    elif file_type == ".json":
        # 处理 JSON 文件
        with open(file_path_obj, encoding="utf-8") as f:
            data = json.load(f)
        # 将 JSON 数据格式化为 markdown 代码块
        json_str = json.dumps(data, ensure_ascii=False, indent=4)
        del_temp_file(file_name)
        return f"# {file_org_name}\n\n```json\n{json_str}\n```"

    elif file_type in [".pdf"]:
        # 处理 pdf 文档
        text = process_pdf(file_path_obj)
        del_temp_file(file_name)
        return f"# {file_org_name}\n\n{text}"

    elif file_type in [".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif"]:
        # 处理 image 文档
        text = process_image(file_path_obj)
        del_temp_file(file_name)
        return f"# {file_org_name}\n\n{text}"

    # elif file_type in ['.ppt', '.pptx']:
    #     # 处理 pdf 文档
    #     text = process_ppt(file_path_obj)
    #     return f"# {file_org_name}\n\n{text}"
    else:
        raise ValueError(f"Unsupported file type: {file_type}")


if __name__ == '__main__':

    # path = r'G:\work\debug\ll-gg\test_doc\宪法.md'
    path = r'C:\Users\mojia\Desktop\jetbra\README.pdf'

    data_byte = open(path, 'rb').read()

    ret = process_file_to_markdown(data_byte, path)
    print(ret)







