Support table for markdown file in general parser (#1278)
Browse files### What problem does this PR solve?
Support extracting table for markdown file in general parser
### Type of change
- [x] New Feature (non-breaking change which adds functionality)
- deepdoc/parser/__init__.py +2 -1
- deepdoc/parser/markdown_parser.py +44 -0
- rag/app/naive.py +34 -3
deepdoc/parser/__init__.py
CHANGED
@@ -16,4 +16,5 @@ from .docx_parser import RAGFlowDocxParser as DocxParser
|
|
16 |
from .excel_parser import RAGFlowExcelParser as ExcelParser
|
17 |
from .ppt_parser import RAGFlowPptParser as PptParser
|
18 |
from .html_parser import RAGFlowHtmlParser as HtmlParser
|
19 |
-
from .json_parser import RAGFlowJsonParser as JsonParser
|
|
|
|
16 |
from .excel_parser import RAGFlowExcelParser as ExcelParser
|
17 |
from .ppt_parser import RAGFlowPptParser as PptParser
|
18 |
from .html_parser import RAGFlowHtmlParser as HtmlParser
|
19 |
+
from .json_parser import RAGFlowJsonParser as JsonParser
|
20 |
+
from .markdown_parser import RAGFlowMarkdownParser as MarkdownParser
|
deepdoc/parser/markdown_parser.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
#
|
14 |
+
import re
|
15 |
+
|
16 |
+
class RAGFlowMarkdownParser:
|
17 |
+
def __init__(self, chunk_token_num=128):
|
18 |
+
self.chunk_token_num = int(chunk_token_num)
|
19 |
+
|
20 |
+
def extract_tables_and_remainder(self, markdown_text):
|
21 |
+
# Standard Markdown table
|
22 |
+
table_pattern = re.compile(
|
23 |
+
r'''
|
24 |
+
(?:\n|^)
|
25 |
+
(?:\|.*?\|.*?\|.*?\n)
|
26 |
+
(?:\|(?:\s*[:-]+[-| :]*\s*)\|.*?\n)
|
27 |
+
(?:\|.*?\|.*?\|.*?\n)+
|
28 |
+
''', re.VERBOSE)
|
29 |
+
tables = table_pattern.findall(markdown_text)
|
30 |
+
remainder = table_pattern.sub('', markdown_text)
|
31 |
+
|
32 |
+
# Borderless Markdown table
|
33 |
+
no_border_table_pattern = re.compile(
|
34 |
+
r'''
|
35 |
+
(?:\n|^)
|
36 |
+
(?:\S.*?\|.*?\n)
|
37 |
+
(?:(?:\s*[:-]+[-| :]*\s*).*?\n)
|
38 |
+
(?:\S.*?\|.*?\n)+
|
39 |
+
''', re.VERBOSE)
|
40 |
+
no_border_tables = no_border_table_pattern.findall(remainder)
|
41 |
+
tables.extend(no_border_tables)
|
42 |
+
remainder = no_border_table_pattern.sub('', remainder)
|
43 |
+
|
44 |
+
return remainder, tables
|
rag/app/naive.py
CHANGED
@@ -17,12 +17,12 @@ from timeit import default_timer as timer
|
|
17 |
import re
|
18 |
from deepdoc.parser.pdf_parser import PlainParser
|
19 |
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec, concat_img, naive_merge_docx, tokenize_chunks_docx
|
20 |
-
from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser, JsonParser
|
21 |
from rag.settings import cron_logger
|
22 |
from rag.utils import num_tokens_from_string
|
23 |
from PIL import Image
|
24 |
from functools import reduce
|
25 |
-
|
26 |
class Docx(DocxParser):
|
27 |
def __init__(self):
|
28 |
pass
|
@@ -135,6 +135,31 @@ class Pdf(PdfParser):
|
|
135 |
for b in self.boxes], tbls
|
136 |
|
137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
def chunk(filename, binary=None, from_page=0, to_page=100000,
|
139 |
lang="Chinese", callback=None, **kwargs):
|
140 |
"""
|
@@ -185,7 +210,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|
185 |
excel_parser = ExcelParser()
|
186 |
sections = [(l, "") for l in excel_parser.html(binary) if l]
|
187 |
|
188 |
-
elif re.search(r"\.(txt|
|
189 |
callback(0.1, "Start to parse.")
|
190 |
txt = ""
|
191 |
if binary:
|
@@ -207,6 +232,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|
207 |
sections.append((sec, ""))
|
208 |
|
209 |
callback(0.8, "Finish parsing.")
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
|
211 |
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
212 |
callback(0.1, "Start to parse.")
|
|
|
17 |
import re
|
18 |
from deepdoc.parser.pdf_parser import PlainParser
|
19 |
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec, concat_img, naive_merge_docx, tokenize_chunks_docx
|
20 |
+
from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser, JsonParser, MarkdownParser
|
21 |
from rag.settings import cron_logger
|
22 |
from rag.utils import num_tokens_from_string
|
23 |
from PIL import Image
|
24 |
from functools import reduce
|
25 |
+
from markdown import markdown
|
26 |
class Docx(DocxParser):
|
27 |
def __init__(self):
|
28 |
pass
|
|
|
135 |
for b in self.boxes], tbls
|
136 |
|
137 |
|
138 |
+
class Markdown(MarkdownParser):
|
139 |
+
def __call__(self, filename, binary=None):
|
140 |
+
txt = ""
|
141 |
+
tbls = []
|
142 |
+
if binary:
|
143 |
+
encoding = find_codec(binary)
|
144 |
+
txt = binary.decode(encoding, errors="ignore")
|
145 |
+
else:
|
146 |
+
with open(filename, "r") as f:
|
147 |
+
txt = f.read()
|
148 |
+
remainder, tables = self.extract_tables_and_remainder(f'{txt}\n')
|
149 |
+
sections = []
|
150 |
+
tbls = []
|
151 |
+
for sec in remainder.split("\n"):
|
152 |
+
if num_tokens_from_string(sec) > 10 * self.chunk_token_num:
|
153 |
+
sections.append((sec[:int(len(sec)/2)], ""))
|
154 |
+
sections.append((sec[int(len(sec)/2):], ""))
|
155 |
+
else:
|
156 |
+
sections.append((sec, ""))
|
157 |
+
print(tables)
|
158 |
+
for table in tables:
|
159 |
+
tbls.append(((None, markdown(table, extensions=['markdown.extensions.tables'])), ""))
|
160 |
+
return sections, tbls
|
161 |
+
|
162 |
+
|
163 |
def chunk(filename, binary=None, from_page=0, to_page=100000,
|
164 |
lang="Chinese", callback=None, **kwargs):
|
165 |
"""
|
|
|
210 |
excel_parser = ExcelParser()
|
211 |
sections = [(l, "") for l in excel_parser.html(binary) if l]
|
212 |
|
213 |
+
elif re.search(r"\.(txt|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt)$", filename, re.IGNORECASE):
|
214 |
callback(0.1, "Start to parse.")
|
215 |
txt = ""
|
216 |
if binary:
|
|
|
232 |
sections.append((sec, ""))
|
233 |
|
234 |
callback(0.8, "Finish parsing.")
|
235 |
+
|
236 |
+
elif re.search(r"\.(md|markdown)$", filename, re.IGNORECASE):
|
237 |
+
callback(0.1, "Start to parse.")
|
238 |
+
sections, tbls = Markdown(int(parser_config.get("chunk_token_num", 128)))(filename, binary)
|
239 |
+
res = tokenize_table(tbls, doc, eng)
|
240 |
+
callback(0.8, "Finish parsing.")
|
241 |
|
242 |
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
243 |
callback(0.1, "Start to parse.")
|