Datasets:

Modalities:
Image
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask
zuminghuang commited on
Commit
65e3eb7
·
verified ·
1 Parent(s): d89e6cb

Upload 6 files

Browse files
convert_to_sharegpt.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import sys
4
+ import json
5
+ import argparse
6
+ from datasets import load_dataset
7
+ from tqdm import tqdm
8
+ import random
9
+ from multiprocessing import cpu_count
10
+ from pathlib import Path
11
+
12
+
13
+ markdown_prompts = [
14
+ "Please transform the document’s contents into Markdown format.",
15
+ "Extract the core information of the document and present it in markdown form.",
16
+ "Reconstruct the document in markdown format, paying attention to title hierarchy and list usage.",
17
+ "Task: Parse the main body of the document and convert it to markdown. Requirements: Retain the original logical structure, use elements such as titles, lists, and quotes appropriately, and ensure that the output document is clear and easy to read.",
18
+ "Reorganize the document using markdown syntax, ensuring clear structure and logical coherence.",
19
+ ]
20
+
21
+
22
+ html_table_prompts = [
23
+ "Please encode the table from the image into HTML format.",
24
+ "Render the table in the image as HTML code, please.",
25
+ "Please transform the table from the image into HTML format.",
26
+ "Convert the image’s table data into the HTML structure.",
27
+ "Transform the image’s table into the HTML format, please.",
28
+ "Convert the table found in the image into HTML format.",
29
+ ]
30
+
31
+
32
+ def get_random_prompt(task_type):
33
+ prompts = {
34
+ "document_parsing": markdown_prompts,
35
+ "table_parsing": html_table_prompts,
36
+ }
37
+ return random.choice(prompts.get(task_type, [""]))
38
+
39
+
40
+ def build_res_batch(item):
41
+ idx, img, gt, attr = item["id"], item["image"], item["gt"], item["attributes"]
42
+
43
+ info = json.loads(attr)
44
+ task_type = info.get("task", "unknown")
45
+ doc_type = info.get("document_type", "unknown")
46
+ save_path = os.path.join(args.image_path, idx + ".png")
47
+ if not os.path.exists(save_path):
48
+ img.save(save_path, quality=100)
49
+
50
+ results = {
51
+ "images": [str(Path(save_path).resolve())],
52
+ "conversations": [
53
+ {"from": "human", "value": get_random_prompt(task_type)},
54
+ {"from": "gpt", "value": gt},
55
+ ],
56
+ "attributes": {"document_type": doc_type, "task": task_type},
57
+ }
58
+
59
+ if "bbox" in item:
60
+ bbox = item["bbox"]
61
+ results["bbox"] = (
62
+ json.dumps(json.loads(bbox), ensure_ascii=False) if bbox != "" else ""
63
+ )
64
+
65
+ return results
66
+
67
+
68
+ def main(args):
69
+ file_dir = args.input
70
+ dataset = load_dataset(
71
+ "parquet",
72
+ data_files=os.path.join(file_dir, "train-*.parquet"),
73
+ split="train",
74
+ cache_dir=file_dir,
75
+ )
76
+ print(dataset)
77
+ os.makedirs(args.image_path, exist_ok=True)
78
+
79
+ processed = dataset.map(
80
+ build_res_batch,
81
+ batched=False,
82
+ num_proc=32,
83
+ remove_columns=dataset.column_names,
84
+ desc="Converting to sharegpt format",
85
+ )
86
+
87
+ df = processed.to_pandas()
88
+ df.to_json(args.output, orient="records", force_ascii=False, indent=2)
89
+
90
+
91
+ if __name__ == "__main__":
92
+
93
+ def parse_args():
94
+ parser = argparse.ArgumentParser(
95
+ description="Convert parquet format to sharegpt format"
96
+ )
97
+ parser.add_argument("--input", type=str, required=True, help="Input directory")
98
+ parser.add_argument(
99
+ "--output", type=str, required=True, help="Output json file"
100
+ )
101
+ parser.add_argument(
102
+ "--image_path", required=True, help="output image directory"
103
+ )
104
+ return parser.parse_args()
105
+
106
+ args = parse_args()
107
+ main(args)
train-00000-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9afc2343cc57b5f41753ba3865138973bdb579fadcd24c08ed90da621c68a0f7
3
+ size 5634346684
train-00001-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b229f7a356bd5920f62a3e69d181dd27569a8a50264236d8cd15b667663d508
3
+ size 5791874958
train-00002-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2af63e50022e4c711f5fff7cb70c5d67ae638326c58cd18ccadad30fce786320
3
+ size 3808354705
train-00003-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e41dddcc9c8d9708860b6883ec4d470ebfa0f6e3bdb8e71b1950c9c9fe9c27b4
3
+ size 3040480908
train-00004-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c014418013964a70b25005a46d57a225304b55ed7766af512f495c58c84dae82
3
+ size 384367163