# server.py
import os
import shutil
import tempfile

from flask import Flask, request, send_file, jsonify, after_this_request
from mineru.data.data_reader_writer import FileBasedDataReader

app = Flask(__name__)

# 自定义临时目录路径
CUSTOM_TEMP_DIR = os.path.join(tempfile.gettempdir(), "sheyue/mineru/temp")


def ensure_clean_temp_dir():
    """确保目录存在，并且是空的"""
    if os.path.exists(CUSTOM_TEMP_DIR):
        shutil.rmtree(CUSTOM_TEMP_DIR)  # 清空旧内容
    os.makedirs(CUSTOM_TEMP_DIR, exist_ok=True)


@app.route('/mineru/test', methods=['GET'])
def test():
    return 'info'


@app.route('/mineru/parse', methods=['POST'])
def parse_pdf():
    try:
        return chuli()
    except Exception as e:
        print(f"Error during processing: {e}")
        return jsonify({"error": "An error occurred during processing"}), 500



def chuli():
    file = request.files.get('pdf')
    if not file:
        return jsonify({"error": "No file provided"}), 400
    with tempfile.TemporaryDirectory() as temp_dir:
        pdf_path = os.path.join(temp_dir, file.filename)
        print(pdf_path)
        file.save(pdf_path)

        name_without_suff = os.path.splitext(file.filename)[0]
        local_image_dir = os.path.join(temp_dir, "images")
        os.makedirs(local_image_dir, exist_ok=True)

        reader1 = FileBasedDataReader("")
        pdf_bytes = reader1.read(pdf_path)

        do_parse([name_without_suff], [pdf_bytes], temp_dir)

        # 创建干净的 output_dir
        output_dir = os.path.join(temp_dir, "output_files")
        os.makedirs(output_dir, exist_ok=True)

        for filename in [f"{name_without_suff}.md", f"{name_without_suff}_layout.pdf"
            , f"{name_without_suff}_spans.pdf", f"{name_without_suff}_content_list.json"
            , f"{name_without_suff}_middle.json"
            , f"{name_without_suff}_model.json"
            , f"{name_without_suff}_origin.pdf"
                         ]:
            src = os.path.join(temp_dir, filename)
            if os.path.exists(src):
                shutil.copy(src, output_dir)

        if os.path.exists(local_image_dir):
            shutil.copytree(local_image_dir, os.path.join(output_dir, "images"))

        # 打包 ZIP
        zip_path = os.path.join(temp_dir, 'output')
        shutil.make_archive(zip_path, 'zip', output_dir)

        shutil.rmtree(local_image_dir)  # 删除 images 目录
        os.remove(pdf_path)  # 删除原始 PDF 文件

        zip_full_path = zip_path + ".zip"

        @after_this_request
        def cleanup(response):
            try:
                pass  # TemporaryDirectory 会自动清理
            except:
                pass
            return response

        return send_file(
            zip_full_path,
            mimetype='application/zip',
            as_attachment=True,
            download_name=f"{name_without_suff}_result.zip"
        )


import copy
import json
import os

from loguru import logger

from mineru.cli.common import convert_pdf_bytes_to_bytes_by_pypdfium2, prepare_env
from mineru.data.data_reader_writer import FileBasedDataWriter
from mineru.utils.draw_bbox import draw_layout_bbox, draw_span_bbox
from mineru.utils.enum_class import MakeMode
from mineru.backend.pipeline.pipeline_analyze import doc_analyze as pipeline_doc_analyze
from mineru.backend.pipeline.pipeline_middle_json_mkcontent import union_make as pipeline_union_make
from mineru.backend.pipeline.model_json_to_middle_json import result_to_middle_json as pipeline_result_to_middle_json


def do_parse(pdf_file_names, pdf_bytes_list, temp_dir):
    # ds = PymuDocDataset(pdf_bytes)
    # if ds.classify() == SupportedPdfParseMethod.OCR:
    #     infer_result = ds.apply(doc_analyze, ocr=True)
    #     pipe_result = infer_result.pipe_ocr_mode(image_writer)
    # else:
    #     infer_result = ds.apply(doc_analyze, ocr=False)
    #     pipe_result = infer_result.pipe_txt_mode(image_writer)
    # # 生成各种输出文件
    # infer_result.draw_model(os.path.join(temp_dir, f"{name_without_suff}_model.pdf"))
    # pipe_result.draw_layout(os.path.join(temp_dir, f"{name_without_suff}_layout.pdf"))
    # pipe_result.draw_span(os.path.join(temp_dir, f"{name_without_suff}_spans.pdf"))
    # pipe_result.dump_md(md_writer, f"{name_without_suff}.md", os.path.basename(local_image_dir))
    # pipe_result.dump_content_list(md_writer, f"{name_without_suff}_content_list.json",
    #                               os.path.basename(local_image_dir))
    # 创建干净的 output_dir
    # output_dir = os.path.join(temp_dir, "output_files")
    # os.makedirs(output_dir, exist_ok=True)
    for idx, pdf_bytes in enumerate(pdf_bytes_list):
        # print(pdf_bytes)
        new_pdf_bytes = convert_pdf_bytes_to_bytes_by_pypdfium2(pdf_bytes, 0, None)
        pdf_bytes_list[idx] = new_pdf_bytes

    infer_results, all_image_lists, all_pdf_docs, lang_list, ocr_enabled_list = pipeline_doc_analyze(pdf_bytes_list,
                                                                                                     ["ch", 'en'],
                                                                                                     parse_method='auto',
                                                                                                     formula_enable=True,
                                                                                                     table_enable=True)

    for idx, model_list in enumerate(infer_results):
        model_json = copy.deepcopy(model_list)
        pdf_file_name = pdf_file_names[idx]
        local_image_dir, local_md_dir = prepare_env(temp_dir, pdf_file_name, "auto")
        image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(local_md_dir)

        images_list = all_image_lists[idx]
        pdf_doc = all_pdf_docs[idx]
        _lang = lang_list[idx]
        _ocr_enable = ocr_enabled_list[idx]
        middle_json = pipeline_result_to_middle_json(model_list, images_list, pdf_doc, image_writer, _lang, _ocr_enable,
                                                     True)

        pdf_info = middle_json["pdf_info"]

        pdf_bytes = pdf_bytes_list[idx]
        draw_layout_bbox(pdf_info, pdf_bytes, temp_dir, f"{pdf_file_name}_layout.pdf")

        draw_span_bbox(pdf_info, pdf_bytes, temp_dir, f"{pdf_file_name}_spans.pdf")

        md_writer.write(os.path.join(temp_dir, f"{pdf_file_name}_origin.pdf"),
                        pdf_bytes,
                        )

        image_dir = str(os.path.basename(local_image_dir))
        md_content_str = pipeline_union_make(pdf_info, MakeMode.MM_MD, image_dir)
        md_writer.write_string(
            os.path.join(temp_dir, f"{pdf_file_name}.md"),
            md_content_str,
        )

        image_dir = str(os.path.basename(local_image_dir))
        content_list = pipeline_union_make(pdf_info, MakeMode.CONTENT_LIST, image_dir)
        md_writer.write_string(
            os.path.join(temp_dir, f"{pdf_file_name}_content_list.json"),
            json.dumps(content_list, ensure_ascii=False, indent=4),
        )

        md_writer.write_string(
            os.path.join(temp_dir, f"{pdf_file_name}_middle.json"),
            json.dumps(middle_json, ensure_ascii=False, indent=4),
        )

        md_writer.write_string(
            os.path.join(temp_dir, f"{pdf_file_name}_model.json"),
            json.dumps(model_json, ensure_ascii=False, indent=4),
        )

        logger.info(f"local output dir is {local_md_dir}")


if __name__ == '__main__':
    os.environ['MINERU_MODEL_SOURCE'] = "local"
    app.run(host='0.0.0.0', port=5000)
