bastienp commited on
Commit
032c4ac
·
1 Parent(s): c67f462

feat: update script dataset and include data from coco

Browse files
Files changed (3) hide show
  1. data/test.zip +3 -0
  2. data/train.zip +3 -0
  3. watermarkdataset.py +48 -6
data/test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4c3c88a478e8bc55fb239a623c8fea70e33e21384cec74b82e6fa0d62e9b945
3
+ size 464
data/train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a353d1d88a5379b2f73f5a87443161f0c15f6ed7ab8f6c82f2c16bf598d6365
3
+ size 810019672
watermarkdataset.py CHANGED
@@ -1,8 +1,8 @@
1
  import os
2
- import torch
 
3
 
4
  import datasets
5
- from pycocotools.coco import COCO
6
 
7
  _DESCRIPTION = """\
8
  Watermark Dataset
@@ -10,7 +10,7 @@ _DESCRIPTION = """\
10
 
11
  _VERSION = datasets.Version("1.0.0")
12
 
13
- _REPO = "data"# "https://huggingface.co/datasets/bastienp/visible-watermark-pita/raw/main/data"
14
  _URLS = {"train": f"{_REPO}/train.zip", "valid": f"{_REPO}/valid.zip"}
15
 
16
  _CATEGORIES = ["watermark"]
@@ -31,11 +31,53 @@ class WatermarkPita(datasets.GeneratorBasedBuilder):
31
  }),
32
  }
33
  ),
34
-
35
  description=_DESCRIPTION,
36
  )
37
 
38
  def _split_generators(self, dl_manager):
39
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- def _generate_examples(self, images, metadata_path):
 
1
  import os
2
+ from glob import glob
3
+ from PIL import Image
4
 
5
  import datasets
 
6
 
7
  _DESCRIPTION = """\
8
  Watermark Dataset
 
10
 
11
  _VERSION = datasets.Version("1.0.0")
12
 
13
+ _REPO = "data"
14
  _URLS = {"train": f"{_REPO}/train.zip", "valid": f"{_REPO}/valid.zip"}
15
 
16
  _CATEGORIES = ["watermark"]
 
31
  }),
32
  }
33
  ),
34
+
35
  description=_DESCRIPTION,
36
  )
37
 
38
  def _split_generators(self, dl_manager):
39
+ data_dir = dl_manager.download_and_extract(_URLS)
40
+
41
+ return [
42
+ datasets.SplitGenerator(
43
+ name=datasets.Split.TRAIN,
44
+ gen_kwargs={"split": "train", "data_dir": data_dir["train"]},
45
+ ),
46
+ datasets.SplitGenerator(
47
+ name=datasets.Split.VALIDATION,
48
+ gen_kwargs={"split": "valid", "data_dir": data_dir["valid"]},
49
+ ),
50
+ ]
51
+
52
+
53
+ def _generate_examples(self, split, data_dir):
54
+ image_dir = os.path.join(data_dir, "images")
55
+ label_dir = os.path.join(data_dir, "labels")
56
+
57
+ image_paths = sorted(glob(image_dir + "/*/*.png"))
58
+ label_paths = sorted(glob(label_dir + "/*/*.txt"))
59
+
60
+ for idx, (image_path, label_path) in enumerate(zip(image_paths, label_paths)):
61
+ im = Image.open(image_path)
62
+ width, height = im.size
63
+
64
+ with open(label_path, "r") as f:
65
+ lines = f.readlines()
66
+
67
+ objects = []
68
+ for line in lines:
69
+ line = line.strip().split()
70
+
71
+ bbox_class = int(line[0])
72
+ bbox_top_left = int(float(line[1]) * width)
73
+ bbox_top_right = int(float(line[2]) * height)
74
+ bbox_bottom_left = int(float(line[3]) * width)
75
+ bbox_bottom_right = int(float(line[4]) * height)
76
+
77
+
78
+ objects.append({
79
+ "label": bbox_class,
80
+ "bbox": [bbox_top_left, bbox_top_right, bbox_bottom_left, bbox_bottom_right]
81
+ })
82
 
83
+ yield idx, {"image": image_path, "objects": objects}