Spaces:
Runtime error
Runtime error
# Add these imports | |
from pdfminer.high_level import extract_text | |
from pdfminer.layout import LAParams | |
import fitz # PyMuPDF | |
from transformers import LayoutLMv3Processor, LayoutLMv3ForSequenceClassification | |
import torch | |
from PIL import Image | |
import numpy as np | |
import logging | |
from fastapi.logger import logger as fastapi_logger | |
# Copyright (c) Opendatalab. All rights reserved. | |
import base64 | |
import json | |
import os | |
import time | |
import zipfile | |
from pathlib import Path | |
import re | |
import uuid | |
import pymupdf | |
from io import BytesIO | |
from fastapi import FastAPI, File, UploadFile | |
from fastapi.responses import JSONResponse | |
import uvicorn | |
import traceback | |
from datetime import datetime | |
# Initialize FastAPI app | |
app = FastAPI() | |
# Setup and installation commands | |
os.system('pip uninstall -y magic-pdf') | |
os.system('pip install git+https://github.com/opendatalab/MinerU.git@dev') | |
os.system('wget https://github.com/opendatalab/MinerU/raw/dev/scripts/download_models_hf.py -O download_models_hf.py') | |
os.system('python download_models_hf.py') | |
# Configure magic-pdf settings | |
with open('/home/user/magic-pdf.json', 'r') as file: | |
data = json.load(file) | |
data['device-mode'] = "cuda" | |
if os.getenv('apikey'): | |
data['llm-aided-config']['title_aided']['api_key'] = os.getenv('apikey') | |
data['llm-aided-config']['title_aided']['enable'] = True | |
with open('/home/user/magic-pdf.json', 'w') as file: | |
json.dump(data, file, indent=4) | |
os.system('cp -r paddleocr /home/user/.paddleocr') | |
# Import required modules | |
from magic_pdf.data.data_reader_writer import FileBasedDataReader | |
from magic_pdf.libs.hash_utils import compute_sha256 | |
from magic_pdf.tools.common import do_parse, prepare_env | |
from loguru import logger | |
# Настраиваем логирование | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger("uvicorn") | |
def read_fn(path): | |
disk_rw = FileBasedDataReader(os.path.dirname(path)) | |
return disk_rw.read(os.path.basename(path)) | |
def read_fn(path): | |
disk_rw = FileBasedDataReader(os.path.dirname(path)) | |
return disk_rw.read(os.path.basename(path)) | |
def parse_pdf(doc_path, output_dir, end_page_id, is_ocr, layout_mode, formula_enable, table_enable, language): | |
os.makedirs(output_dir, exist_ok=True) | |
try: | |
file_name = f"{str(Path(doc_path).stem)}_{time.time()}" | |
pdf_data = read_fn(doc_path) | |
if is_ocr: | |
parse_method = "ocr" | |
else: | |
parse_method = "auto" | |
local_image_dir, local_md_dir = prepare_env(output_dir, file_name, parse_method) | |
do_parse( | |
output_dir, | |
file_name, | |
pdf_data, | |
[], | |
parse_method, | |
False, | |
end_page_id=end_page_id, | |
layout_model=layout_mode, | |
formula_enable=formula_enable, | |
table_enable=table_enable, | |
lang=language, | |
f_dump_orig_pdf=False, | |
) | |
return local_md_dir, file_name | |
except Exception as e: | |
logger.exception(e) | |
def compress_directory_to_zip(directory_path, output_zip_path): | |
""" | |
压缩指定目录到一个 ZIP 文件。 | |
:param directory_path: 要压缩的目录路径 | |
:param output_zip_path: 输出的 ZIP 文件路径 | |
""" | |
try: | |
with zipfile.ZipFile(output_zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: | |
# 遍历目录中的所有文件和子目录 | |
for root, dirs, files in os.walk(directory_path): | |
for file in files: | |
# 构建完整的文件路径 | |
file_path = os.path.join(root, file) | |
# 计算相对路径 | |
arcname = os.path.relpath(file_path, directory_path) | |
# 添加文件到 ZIP 文件 | |
zipf.write(file_path, arcname) | |
return 0 | |
except Exception as e: | |
logger.exception(e) | |
return -1 | |
def image_to_base64(image_path): | |
with open(image_path, "rb") as image_file: | |
return base64.b64encode(image_file.read()).decode('utf-8') | |
def replace_image_with_base64(markdown_text, image_dir_path): | |
# 匹配Markdown中的图片标签 | |
pattern = r'\!\[(?:[^\]]*)\]\(([^)]+)\)' | |
# 替换图片链接 | |
def replace(match): | |
relative_path = match.group(1) | |
full_path = os.path.join(image_dir_path, relative_path) | |
base64_image = image_to_base64(full_path) | |
return f"![{relative_path}](data:image/jpeg;base64,{base64_image})" | |
# 应用替换 | |
return re.sub(pattern, replace, markdown_text) | |
def to_markdown(file_path, end_pages, is_ocr, layout_mode, formula_enable, table_enable, language): | |
file_path = to_pdf(file_path) | |
if end_pages > 20: | |
end_pages = 20 | |
# 获取识别的md文件以及压缩包文件路径 | |
local_md_dir, file_name = parse_pdf(file_path, './output', end_pages - 1, is_ocr, | |
layout_mode, formula_enable, table_enable, language) | |
archive_zip_path = os.path.join("./output", compute_sha256(local_md_dir) + ".zip") | |
zip_archive_success = compress_directory_to_zip(local_md_dir, archive_zip_path) | |
if zip_archive_success == 0: | |
logger.info("压缩成功") | |
else: | |
logger.error("压缩失败") | |
md_path = os.path.join(local_md_dir, file_name + ".md") | |
with open(md_path, 'r', encoding='utf-8') as f: | |
txt_content = f.read() | |
md_content = replace_image_with_base64(txt_content, local_md_dir) | |
# 返回转换后的PDF路径 | |
new_pdf_path = os.path.join(local_md_dir, file_name + "_layout.pdf") | |
return md_content, txt_content, archive_zip_path, new_pdf_path | |
latex_delimiters = [{"left": "$$", "right": "$$", "display": True}, | |
{"left": '$', "right": '$', "display": False}] | |
def init_model(): | |
from magic_pdf.model.doc_analyze_by_custom_model import ModelSingleton | |
try: | |
model_manager = ModelSingleton() | |
txt_model = model_manager.get_model(False, False) | |
logger.info(f"txt_model init final") | |
ocr_model = model_manager.get_model(True, False) | |
logger.info(f"ocr_model init final") | |
return 0 | |
except Exception as e: | |
logger.exception(e) | |
return -1 | |
model_init = init_model() | |
logger.info(f"model_init: {model_init}") | |
with open("header.html", "r") as file: | |
header = file.read() | |
latin_lang = [ | |
'af', 'az', 'bs', 'cs', 'cy', 'da', 'de', 'es', 'et', 'fr', 'ga', 'hr', | |
'hu', 'id', 'is', 'it', 'ku', 'la', 'lt', 'lv', 'mi', 'ms', 'mt', 'nl', | |
'no', 'oc', 'pi', 'pl', 'pt', 'ro', 'rs_latin', 'sk', 'sl', 'sq', 'sv', | |
'sw', 'tl', 'tr', 'uz', 'vi', 'french', 'german' | |
] | |
arabic_lang = ['ar', 'fa', 'ug', 'ur'] | |
cyrillic_lang = [ | |
'ru', 'rs_cyrillic', 'be', 'bg', 'uk', 'mn', 'abq', 'ady', 'kbd', 'ava', | |
'dar', 'inh', 'che', 'lbe', 'lez', 'tab' | |
] | |
devanagari_lang = [ | |
'hi', 'mr', 'ne', 'bh', 'mai', 'ang', 'bho', 'mah', 'sck', 'new', 'gom', | |
'sa', 'bgc' | |
] | |
other_lang = ['ch', 'en', 'korean', 'japan', 'chinese_cht', 'ta', 'te', 'ka'] | |
all_lang = ['', 'auto'] | |
all_lang.extend([*other_lang, *latin_lang, *arabic_lang, *cyrillic_lang, *devanagari_lang]) | |
def to_pdf(file_path): | |
with pymupdf.open(file_path) as f: | |
if f.is_pdf: | |
return file_path | |
else: | |
pdf_bytes = f.convert_to_pdf() | |
# 将pdfbytes 写入到uuid.pdf中 | |
# 生成唯一的文件名 | |
unique_filename = f"{uuid.uuid4()}.pdf" | |
# 构建完整的文件路径 | |
tmp_file_path = os.path.join(os.path.dirname(file_path), unique_filename) | |
# 将字节数据写入文件 | |
with open(tmp_file_path, 'wb') as tmp_pdf_file: | |
tmp_pdf_file.write(pdf_bytes) | |
return tmp_file_path | |
async def process_document( | |
file: UploadFile = File(...), | |
end_pages: int = 10, | |
is_ocr: bool = False, | |
layout_mode: str = "doclayout_yolo", | |
formula_enable: bool = True, | |
table_enable: bool = True, | |
language: str = "auto" | |
): | |
try: | |
logger.info("\n=== НАЧАЛО ОБРАБОТКИ ДОКУМЕНТА ===") | |
logger.info(f"Имя файла: {file.filename}") | |
logger.info(f"Параметры: end_pages={end_pages}, is_ocr={is_ocr}, language={language}") | |
# Сохраняем временный файл | |
temp_path = f"/tmp/{file.filename}" | |
try: | |
with open(temp_path, "wb") as buffer: | |
content = await file.read() | |
buffer.write(content) | |
logger.info(f"Файл сохранен: {temp_path}") | |
except Exception as e: | |
logger.error(f"Ошибка при сохранении файла: {str(e)}") | |
raise | |
# Извлечение текста через PyMuPDF | |
def extract_text_pymupdf(pdf_path): | |
try: | |
doc = fitz.open(pdf_path) | |
logger.info(f"Открыт PDF, всего страниц: {doc.page_count}") | |
text = "" | |
for page_num in range(min(end_pages, doc.page_count)): | |
try: | |
page = doc[page_num] | |
blocks = page.get_text("blocks") | |
blocks.sort(key=lambda b: (b[1], b[0])) | |
for b in blocks: | |
text += b[4] + "\n" | |
logger.info(f"Обработана страница {page_num + 1}") | |
except Exception as page_error: | |
logger.error(f"Ошибка при обработке страницы {page_num + 1}: {str(page_error)}") | |
doc.close() | |
logger.info(f"Извлечено {len(text)} символов текста через PyMuPDF") | |
return text | |
except Exception as e: | |
logger.error(f"Ошибка при извлечении текста через PyMuPDF: {str(e)}") | |
return str(e) | |
# Извлечение текста через magic-pdf | |
def extract_text_magicpdf(pdf_path): | |
try: | |
# Получаем markdown и HTML | |
md_content, txt_content, archive_zip_path, new_pdf_path = to_markdown( | |
pdf_path, | |
end_pages=end_pages, | |
is_ocr=is_ocr, | |
layout_mode=layout_mode, | |
formula_enable=formula_enable, | |
table_enable=table_enable, | |
language=language | |
) | |
logger.info(f"Извлечено {len(txt_content)} символов текста через magic-pdf") | |
return { | |
"text": txt_content, | |
"html": md_content | |
} | |
except Exception as e: | |
logger.error(f"Ошибка при извлечении текста через magic-pdf: {str(e)}") | |
return {"text": str(e), "html": ""} | |
# Получаем данные из обоих источников | |
pymupdf_text = extract_text_pymupdf(temp_path) or "" | |
magic_pdf_data = extract_text_magicpdf(temp_path) | |
# Проверяем наличие текста хотя бы в одном источнике | |
if not pymupdf_text.strip() and not magic_pdf_data["text"].strip(): | |
error_msg = "Не удалось извлечь текст из документа ни одним из методов" | |
logger.error(error_msg) | |
return JSONResponse( | |
status_code=422, | |
content={ | |
"error": error_msg, | |
"details": "Извлеченный текст пуст" | |
} | |
) | |
# Формируем структуру данных для обработки | |
combined_data = { | |
"sources": { | |
"pymupdf": { | |
"text": pymupdf_text | |
}, | |
"magic_pdf": magic_pdf_data | |
}, | |
"metadata": { | |
"filename": file.filename, | |
"page_count": min(end_pages, fitz.open(temp_path).page_count), | |
"extraction_date": datetime.now().isoformat() | |
} | |
} | |
# Очистка временных файлов | |
try: | |
os.remove(temp_path) | |
logger.info("Временный файл удален") | |
except Exception as e: | |
logger.warning(f"Не удалось удалить временный файл: {str(e)}") | |
logger.info("\n=== ВОЗВРАЩАЕМЫЙ JSON ===") | |
response_json = {"text": json.dumps(combined_data, ensure_ascii=False)} | |
logger.info(json.dumps(response_json, indent=2, ensure_ascii=False)[:500] + "...") | |
logger.info("\n=== УСПЕШНОЕ ЗАВЕРШЕНИЕ ОБРАБОТКИ ===") | |
return JSONResponse(response_json) | |
except Exception as e: | |
error_msg = f"Критическая ошибка при обработке документа: {str(e)}\nTraceback: {traceback.format_exc()}" | |
logger.error(error_msg) | |
return JSONResponse( | |
status_code=500, | |
content={ | |
"error": error_msg, | |
"details": { | |
"error_type": type(e).__name__, | |
"error_message": str(e), | |
"file_name": file.filename if file else None | |
} | |
} | |
) | |
# Initialize models | |
model_init = init_model() | |
logger.info(f"model_init: {model_init}") | |
if __name__ == "__main__": | |
# Запускаем с включенным выводом логов | |
uvicorn.run( | |
app, | |
host="0.0.0.0", | |
port=7860, | |
log_level="info", | |
access_log=True | |
) |