qgyd2021 commited on
Commit
d3a579f
1 Parent(s): b26afd1

[update]add code

Browse files
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
@@ -2,6 +2,7 @@
2
  .git/
3
  .idea/
4
 
 
5
  hub_datasets/
6
 
7
  **/__pycache__/
 
2
  .git/
3
  .idea/
4
 
5
+ data/
6
  hub_datasets/
7
 
8
  **/__pycache__/
cppe-5.py → cppe5.py RENAMED
@@ -1,26 +1,19 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """CPPE-5 dataset."""
16
-
17
-
18
- import collections
19
  import json
20
  import os
 
21
 
22
  import datasets
23
 
 
 
 
 
 
 
 
24
 
25
  _CITATION = """\
26
  @misc{dagli2021cppe5,
@@ -39,15 +32,6 @@ to allow the study of subordinate categorization of medical personal protective
39
  which is not possible with other popular data sets that focus on broad level categories.
40
  """
41
 
42
- _HOMEPAGE = "https://sites.google.com/view/cppe5"
43
-
44
- _LICENSE = "Unknown"
45
-
46
- # _URL = "https://drive.google.com/uc?id=1MGnaAfbckUmigGUvihz7uiHGC6rBIbvr"
47
- _URL = "data/dataset.tar.gz"
48
-
49
- _CATEGORIES = ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"]
50
-
51
 
52
  class CPPE5(datasets.GeneratorBasedBuilder):
53
  """CPPE - 5 dataset."""
@@ -80,56 +64,53 @@ class CPPE5(datasets.GeneratorBasedBuilder):
80
  )
81
 
82
  def _split_generators(self, dl_manager):
83
- archive = dl_manager.download(_URL)
 
 
 
84
  return [
85
  datasets.SplitGenerator(
86
  name=datasets.Split.TRAIN,
87
  gen_kwargs={
88
- "annotation_file_path": "annotations/train.json",
89
- "files": dl_manager.iter_archive(archive),
90
  },
91
  ),
92
  datasets.SplitGenerator(
93
  name=datasets.Split.TEST,
94
  gen_kwargs={
95
- "annotation_file_path": "annotations/test.json",
96
- "files": dl_manager.iter_archive(archive),
97
  },
98
  ),
99
  ]
100
 
101
- def _generate_examples(self, annotation_file_path, files):
102
- def process_annot(annot, category_id_to_category):
103
- return {
104
- "id": annot["id"],
105
- "area": annot["area"],
106
- "bbox": annot["bbox"],
107
- "category": category_id_to_category[annot["category_id"]],
108
- }
109
 
110
- image_id_to_image = {}
111
  idx = 0
112
- # This loop relies on the ordering of the files in the archive:
113
- # Annotation files come first, then the images.
114
- for path, f in files:
115
- file_name = os.path.basename(path)
116
- if path == annotation_file_path:
117
- annotations = json.load(f)
118
- category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
119
- image_id_to_annotations = collections.defaultdict(list)
120
- for annot in annotations["annotations"]:
121
- image_id_to_annotations[annot["image_id"]].append(annot)
122
- image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
123
- elif file_name in image_id_to_image:
124
- image = image_id_to_image[file_name]
125
- objects = [
126
- process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
127
- ]
128
  yield idx, {
129
- "image_id": image["id"],
130
- "image": {"path": path, "bytes": f.read()},
131
- "width": image["width"],
132
- "height": image["height"],
133
- "objects": objects,
134
  }
135
  idx += 1
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ from glob import glob
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import json
5
  import os
6
+ from pathlib import Path
7
 
8
  import datasets
9
 
10
+ # _URL = "https://drive.google.com/uc?id=1MGnaAfbckUmigGUvihz7uiHGC6rBIbvr"
11
+
12
+ _HOMEPAGE = "https://sites.google.com/view/cppe5"
13
+
14
+ _LICENSE = "Unknown"
15
+
16
+ _CATEGORIES = ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"]
17
 
18
  _CITATION = """\
19
  @misc{dagli2021cppe5,
 
32
  which is not possible with other popular data sets that focus on broad level categories.
33
  """
34
 
 
 
 
 
 
 
 
 
 
35
 
36
  class CPPE5(datasets.GeneratorBasedBuilder):
37
  """CPPE - 5 dataset."""
 
64
  )
65
 
66
  def _split_generators(self, dl_manager):
67
+ """Returns SplitGenerators."""
68
+ train_json = dl_manager.download("data/annotations/train.jsonl")
69
+ test_json = dl_manager.download("data/annotations/test.jsonl")
70
+
71
  return [
72
  datasets.SplitGenerator(
73
  name=datasets.Split.TRAIN,
74
  gen_kwargs={
75
+ "archive_path": train_json,
76
+ "dl_manager": dl_manager,
77
  },
78
  ),
79
  datasets.SplitGenerator(
80
  name=datasets.Split.TEST,
81
  gen_kwargs={
82
+ "archive_path": test_json,
83
+ "dl_manager": dl_manager,
84
  },
85
  ),
86
  ]
87
 
88
+ def _generate_examples(self, archive_path, dl_manager):
89
+ """Yields examples."""
90
+ archive_path = Path(archive_path)
 
 
 
 
 
91
 
 
92
  idx = 0
93
+
94
+ with open(archive_path, "r", encoding="utf-8") as f:
95
+ for row in f:
96
+ sample = json.loads(row)
97
+
98
+ file_path = sample["image"]
99
+ file_path = os.path.join("data/images", file_path)
100
+ file_path = dl_manager.download(file_path)
101
+
102
+ with open(file_path, "rb") as image_f:
103
+ image_bytes = image_f.read()
104
+
 
 
 
 
105
  yield idx, {
106
+ "image_id": sample["image_id"],
107
+ "image": {"path": file_path, "bytes": image_bytes},
108
+ "width": sample["width"],
109
+ "height": sample["height"],
110
+ "objects": sample["objects"],
111
  }
112
  idx += 1
113
+
114
+
115
+ if __name__ == '__main__':
116
+ pass
data/dataset.tar.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1151086e59fcb87825ecf4d362847a3f023ba69e7ace0f513d5aadc0e3dd3094
3
- size 238482705
 
 
 
 
examples/make_jsonl.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ from collections import defaultdict
4
+ import json
5
+
6
+ from project_settings import project_path
7
+
8
+
9
+ def main():
10
+
11
+ for subset in ["train", "test"]:
12
+ filename = project_path / "data/annotations/{}.json".format(subset)
13
+ with open(filename.as_posix(), "r", encoding="utf-8") as f:
14
+ js = json.load(f)
15
+
16
+ images = js["images"]
17
+ type_ = js["type"]
18
+ annotations = js["annotations"]
19
+ categories = js["categories"]
20
+
21
+ index_to_label = dict()
22
+ for category in categories:
23
+ index = category["id"]
24
+ name = category["name"]
25
+ index_to_label[index] = name
26
+
27
+ # print(images)
28
+ image_id_to_annotations = defaultdict(list)
29
+ for annotation in annotations:
30
+ image_id = annotation["image_id"]
31
+ image_id_to_annotations[image_id].append(annotation)
32
+
33
+ with open("{}.jsonl".format(subset), "w", encoding="utf-8") as f:
34
+ for image in images:
35
+ image_id = image["id"]
36
+ annotations = image_id_to_annotations[image_id]
37
+
38
+ row = {
39
+ "image_id": image["id"],
40
+ "image": image["file_name"],
41
+ "width": image["width"],
42
+ "height": image["height"],
43
+ "objects": [
44
+ {
45
+ "id": annotation["id"],
46
+ "area": annotation["area"],
47
+ "bbox": annotation["bbox"],
48
+ "category": index_to_label[annotation["category_id"]],
49
+ } for annotation in annotations
50
+ ]
51
+ }
52
+ row = json.dumps(row, ensure_ascii=False)
53
+ f.write("{}\n".format(row))
54
+
55
+ return
56
+
57
+
58
+ if __name__ == '__main__':
59
+ main()
main.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ from datasets import load_dataset
4
+
5
+ dataset = load_dataset(
6
+ "cppe5.py",
7
+ name=None,
8
+ split="train",
9
+ )
10
+
11
+ for sample in dataset:
12
+ print(sample)
13
+
14
+
15
+ if __name__ == '__main__':
16
+ pass
project_settings.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+ from pathlib import Path
5
+
6
+
7
+ project_path = os.path.abspath(os.path.dirname(__file__))
8
+ project_path = Path(project_path)
9
+
10
+
11
+ if __name__ == '__main__':
12
+ pass
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ datasets==2.10.1
2
+ tqdm==4.66.1
3
+ Pillow==9.4.0