vision-paper commited on
Commit
ae4c20f
·
verified ·
1 Parent(s): f48f408

Upload 2 files

Browse files
Files changed (2) hide show
  1. cn_segment_dataset.py +103 -0
  2. train.jsonl +0 -0
cn_segment_dataset.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from huggingface_hub import hf_hub_url
3
+ import datasets
4
+ import os
5
+
6
+ _VERSION = datasets.Version("0.0.1")
7
+
8
+ _DESCRIPTION = "TODO"
9
+ _HOMEPAGE = "TODO"
10
+ _LICENSE = "TODO"
11
+ _CITATION = "TODO"
12
+
13
+ _FEATURES = datasets.Features(
14
+ {
15
+ "image": datasets.Image(),
16
+ "segment": datasets.Image(),
17
+ "prompt": datasets.Value("string"),
18
+ },
19
+ )
20
+
21
+ METADATA_URL = hf_hub_url(
22
+ "vision-paper/cn_segment_dataset",
23
+ filename="train.jsonl",
24
+ repo_type="dataset",
25
+ )
26
+
27
+ IMAGE_URL = hf_hub_url(
28
+ "vision-paper/cn_segment_dataset",
29
+ filename="image.zip",
30
+ repo_type="dataset",
31
+ )
32
+
33
+ SEGMENT_URL = hf_hub_url(
34
+ "vision-paper/cn_segment_dataset",
35
+ filename="segment.zip",
36
+ repo_type="dataset",
37
+ )
38
+
39
+ _DEFAULT_CONFIG = datasets.BuilderConfig(name="default", version=_VERSION)
40
+
41
+
42
+ class VTONHD_segmented_segment(datasets.GeneratorBasedBuilder):
43
+ BUILDER_CONFIGS = [_DEFAULT_CONFIG]
44
+ DEFAULT_CONFIG_NAME = "default"
45
+
46
+ def _info(self):
47
+ return datasets.DatasetInfo(
48
+ description=_DESCRIPTION,
49
+ features=_FEATURES,
50
+ supervised_keys=None,
51
+ homepage=_HOMEPAGE,
52
+ license=_LICENSE,
53
+ citation=_CITATION,
54
+ )
55
+
56
+ def _split_generators(self, dl_manager):
57
+ metadata_path = dl_manager.download(METADATA_URL)
58
+ image_dir = dl_manager.download_and_extract(
59
+ IMAGE_URL
60
+ )
61
+ segment_dir = dl_manager.download_and_extract(
62
+ SEGMENT_URL
63
+ )
64
+
65
+ return [
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.TRAIN,
68
+ # These kwargs will be passed to _generate_examples
69
+ gen_kwargs={
70
+ "metadata_path": metadata_path,
71
+ "image_dir": image_dir,
72
+ "segment_dir": segment_dir,
73
+ },
74
+ ),
75
+ ]
76
+
77
+ def _generate_examples(self, metadata_path, image_dir, segment_dir, reference_dir):
78
+ metadata = pd.read_json(metadata_path, lines=True)
79
+
80
+ for _, row in metadata.iterrows():
81
+ prompt = row["prompt"]
82
+
83
+ image_path = row["image"]
84
+ image_path = os.path.join(image_dir, image_path)
85
+ image = open(image_path, "rb").read()
86
+
87
+ segment_path = row["segment"]
88
+ segment_path = os.path.join(
89
+ segment_dir, row["segment"]
90
+ )
91
+ segment = open(segment_path, "rb").read()
92
+
93
+ yield row["image"], {
94
+ "prompt": prompt,
95
+ "image": {
96
+ "path": image_path,
97
+ "bytes": image,
98
+ },
99
+ "segment": {
100
+ "path": segment_path,
101
+ "bytes": segment,
102
+ },
103
+ }
train.jsonl ADDED
The diff for this file is too large to render. See raw diff