ydshieh commited on
Commit
6984f2b
1 Parent(s): 93213be
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +33 -0
  2. README.md +41 -0
  3. coco_dataset/coco_dataset.py +191 -0
  4. coco_dataset/dummy_data/annotations_trainval2017.zip +3 -0
  5. coco_dataset/dummy_data/captions_train2017.json +3 -0
  6. coco_dataset/dummy_data/captions_val2017.json +3 -0
  7. coco_dataset/dummy_data/image_info_test-dev2017.json +3 -0
  8. coco_dataset/dummy_data/image_info_test2017.json +3 -0
  9. coco_dataset/dummy_data/image_info_test2017.zip +3 -0
  10. coco_dataset/dummy_data/test2017.zip +3 -0
  11. coco_dataset/dummy_data/test2017/000000000001.jpg +3 -0
  12. coco_dataset/dummy_data/test2017/000000000016.jpg +3 -0
  13. coco_dataset/dummy_data/test2017/000000000019.jpg +3 -0
  14. coco_dataset/dummy_data/test2017/000000000057.jpg +3 -0
  15. coco_dataset/dummy_data/test2017/000000000063.jpg +3 -0
  16. coco_dataset/dummy_data/test2017/000000000069.jpg +3 -0
  17. coco_dataset/dummy_data/test2017/000000000080.jpg +3 -0
  18. coco_dataset/dummy_data/test2017/000000000090.jpg +3 -0
  19. coco_dataset/dummy_data/test2017/000000000106.jpg +3 -0
  20. coco_dataset/dummy_data/test2017/000000000108.jpg +3 -0
  21. coco_dataset/dummy_data/test2017/000000000128.jpg +3 -0
  22. coco_dataset/dummy_data/test2017/000000000155.jpg +3 -0
  23. coco_dataset/dummy_data/test2017/000000000161.jpg +3 -0
  24. coco_dataset/dummy_data/test2017/000000000171.jpg +3 -0
  25. coco_dataset/dummy_data/test2017/000000000178.jpg +3 -0
  26. coco_dataset/dummy_data/test2017/000000000180.jpg +3 -0
  27. coco_dataset/dummy_data/train2017.zip +3 -0
  28. coco_dataset/dummy_data/train2017/000000000009.jpg +3 -0
  29. coco_dataset/dummy_data/train2017/000000000025.jpg +3 -0
  30. coco_dataset/dummy_data/train2017/000000000030.jpg +3 -0
  31. coco_dataset/dummy_data/train2017/000000000034.jpg +3 -0
  32. coco_dataset/dummy_data/train2017/000000000036.jpg +3 -0
  33. coco_dataset/dummy_data/train2017/000000000042.jpg +3 -0
  34. coco_dataset/dummy_data/train2017/000000000049.jpg +3 -0
  35. coco_dataset/dummy_data/train2017/000000000061.jpg +3 -0
  36. coco_dataset/dummy_data/train2017/000000000064.jpg +3 -0
  37. coco_dataset/dummy_data/train2017/000000000071.jpg +3 -0
  38. coco_dataset/dummy_data/train2017/000000000072.jpg +3 -0
  39. coco_dataset/dummy_data/train2017/000000000073.jpg +3 -0
  40. coco_dataset/dummy_data/train2017/000000000074.jpg +3 -0
  41. coco_dataset/dummy_data/train2017/000000000077.jpg +3 -0
  42. coco_dataset/dummy_data/train2017/000000000078.jpg +3 -0
  43. coco_dataset/dummy_data/train2017/000000000081.jpg +3 -0
  44. coco_dataset/dummy_data/val2017.zip +3 -0
  45. coco_dataset/dummy_data/val2017/000000000139.jpg +3 -0
  46. coco_dataset/dummy_data/val2017/000000000285.jpg +3 -0
  47. coco_dataset/dummy_data/val2017/000000000632.jpg +3 -0
  48. coco_dataset/dummy_data/val2017/000000000724.jpg +3 -0
  49. coco_dataset/dummy_data/val2017/000000000776.jpg +3 -0
  50. coco_dataset/dummy_data/val2017/000000000785.jpg +3 -0
.gitattributes CHANGED
@@ -25,3 +25,36 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ coco_dataset/data/* filter=lfs diff=lfs merge=lfs -text
29
+ coco_dataset/data/**/ filter=lfs diff=lfs merge=lfs -text
30
+ coco_dataset/data/**/* filter=lfs diff=lfs merge=lfs -text
31
+ image_caption_dataset/coco_dataset/* filter=lfs diff=lfs merge=lfs -text
32
+ image_caption_dataset/coco_dataset/**/ filter=lfs diff=lfs merge=lfs -text
33
+ image_caption_dataset/coco_dataset/**/*/ filter=lfs diff=lfs merge=lfs -text
34
+ image_caption_dataset/coco_dataset/**" filter=lfs diff=lfs merge=lfs -text
35
+ image_caption_dataset/coco_dataset/**/*" filter=lfs diff=lfs merge=lfs -text
36
+ image_caption_dataset/coco_dataset/**/**" filter=lfs diff=lfs merge=lfs -text
37
+ image_caption_dataset/coco_dataset/**/**/* filter=lfs diff=lfs merge=lfs -text
38
+ image_caption_dataset/coco_dataset/**/**/**" filter=lfs diff=lfs merge=lfs -text
39
+ image_caption_dataset/coco_dataset/**/**/**/* filter=lfs diff=lfs merge=lfs -text
40
+ image_caption_dataset/coco_dataset/**/**/**/*" filter=lfs diff=lfs merge=lfs -text
41
+ image_caption_dataset/coco_dataset/**/**/**/**" filter=lfs diff=lfs merge=lfs -text
42
+ image_caption_dataset/coco_dataset/**/**/**/**/* filter=lfs diff=lfs merge=lfs -text
43
+ image_caption_dataset/coco_dataset/**/**/**/**/*" filter=lfs diff=lfs merge=lfs -text
44
+ image_caption_dataset/coco_dataset/**/**/**/**/**" filter=lfs diff=lfs merge=lfs -text
45
+ image_caption_dataset/coco_dataset/**/**/**/**/**/* filter=lfs diff=lfs merge=lfs -text
46
+ image_caption_dataset/coco_dataset/**/**/**/**/**/*" filter=lfs diff=lfs merge=lfs -text
47
+ image_caption_dataset/coco_dataset/**/**/**/**/**/**" filter=lfs diff=lfs merge=lfs -text
48
+ image_caption_dataset/coco_dataset/**/**/**/**/**/**/* filter=lfs diff=lfs merge=lfs -text
49
+ image_caption_dataset/coco_dataset/**/**/**/**/**/**/*" filter=lfs diff=lfs merge=lfs -text
50
+ image_caption_dataset/coco_dataset/**/**/**/**/**/**/**" filter=lfs diff=lfs merge=lfs -text
51
+ coco_dataset/dummy_data/* filter=lfs diff=lfs merge=lfs -text
52
+ coco_dataset/dummy_data/*" filter=lfs diff=lfs merge=lfs -text
53
+ coco_dataset/dummy_data/**" filter=lfs diff=lfs merge=lfs -text
54
+ coco_dataset/dummy_data/**/* filter=lfs diff=lfs merge=lfs -text
55
+ coco_dataset/dummy_data/**/*" filter=lfs diff=lfs merge=lfs -text
56
+ coco_dataset/dummy_data/**/**" filter=lfs diff=lfs merge=lfs -text
57
+ coco_dataset/dummy_data/**/**/* filter=lfs diff=lfs merge=lfs -text
58
+ coco_dataset/dummy_data/**/**/*" filter=lfs diff=lfs merge=lfs -text
59
+ coco_dataset/dummy_data/**/**/**" filter=lfs diff=lfs merge=lfs -text
60
+
README.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Example
2
+
3
+ The model is by no means a state-of-the-art model, but nevertheless
4
+ produces reasonable image captioning results. It was mainly fine-tuned
5
+ as a proof-of-concept for the 🤗 FlaxVisionEncoderDecoder Framework.
6
+
7
+ The model can be used as follows:
8
+
9
+ ```python
10
+
11
+ import requests
12
+ from PIL import Image
13
+
14
+ from transformers import ViTFeatureExtractor, AutoTokenizer, FlaxVisionEncoderDecoderModel
15
+
16
+ loc = "ydshieh/flax-vit-gpt2-coco-en"
17
+
18
+ feature_extractor = ViTFeatureExtractor.from_pretrained(loc)
19
+ tokenizer = AutoTokenizer.from_pretrained(loc)
20
+ model = FlaxVisionEncoderDecoderModel.from_pretrained(loc)
21
+
22
+ # We will verify our results on an image of cute cats
23
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
24
+ with Image.open(requests.get(url, stream=True).raw) as img:
25
+ pixel_values = feature_extractor(images=img, return_tensors="np").pixel_values
26
+
27
+ def generate_step(pixel_values):
28
+
29
+ output_ids = model.generate(pixel_values, max_length=16, num_beams=4).sequences
30
+ preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
31
+ preds = [pred.strip() for pred in preds]
32
+
33
+ return preds
34
+
35
+ preds = generate_step(pixel_values)
36
+ print(preds)
37
+
38
+ # should produce
39
+ # ['a cat laying on top of a couch next to another cat']
40
+
41
+ ```
coco_dataset/coco_dataset.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+ import pandas as pd
7
+ import numpy as np
8
+
9
+
10
+ class ImageCaptionBuilderConfig(datasets.BuilderConfig):
11
+
12
+ def __init__(self, name, splits, **kwargs):
13
+
14
+ super().__init__(name, **kwargs)
15
+
16
+ self.splits = splits
17
+
18
+
19
+ # TODO: Add BibTeX citation
20
+ # Find for instance the citation on arxiv or on the dataset repo/website
21
+ _CITATION = """\
22
+ @InProceedings{None,
23
+ title = {COCO dataset},
24
+ author={...},
25
+ year={...}
26
+ }
27
+ """
28
+
29
+ # TODO: Add description of the dataset here
30
+ # You can copy an official description
31
+ _DESCRIPTION = """\
32
+
33
+ """
34
+
35
+ # TODO: Add a link to an official homepage for the dataset here
36
+ _HOMEPAGE = ""
37
+
38
+ # TODO: Add the licence for the dataset here if you can find it
39
+ _LICENSE = ""
40
+
41
+ # TODO: Add link to the official dataset URLs here
42
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
43
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
44
+ _URLs = {}
45
+
46
+
47
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
48
+ class ImageCaptionDataset(datasets.GeneratorBasedBuilder):
49
+ """TODO: Short description of my dataset."""
50
+
51
+ VERSION = datasets.Version("0.0.0")
52
+
53
+ BUILDER_CONFIG_CLASS = ImageCaptionBuilderConfig
54
+ BUILDER_CONFIGS = [
55
+ ImageCaptionBuilderConfig(name='2017', splits=['train', 'valid', 'test']),
56
+ ]
57
+ DEFAULT_CONFIG_NAME = "2017"
58
+
59
+ def _info(self):
60
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
61
+
62
+ feature_dict = {
63
+ "image_id": datasets.Value("int64"),
64
+ "caption_id": datasets.Value("int64"),
65
+ "caption": datasets.Value("string"),
66
+ "height": datasets.Value("int64"),
67
+ "width": datasets.Value("int64"),
68
+ "file_name": datasets.Value("string"),
69
+ "coco_url": datasets.Value("string"),
70
+ "image_path": datasets.Value("string"),
71
+ }
72
+
73
+ features = datasets.Features(feature_dict)
74
+
75
+ return datasets.DatasetInfo(
76
+ # This is the description that will appear on the datasets page.
77
+ description=_DESCRIPTION,
78
+ # This defines the different columns of the dataset and their types
79
+ features=features, # Here we define them above because they are different between the two configurations
80
+ # If there's a common (input, target) tuple from the features,
81
+ # specify them here. They'll be used if as_supervised=True in
82
+ # builder.as_dataset.
83
+ supervised_keys=None,
84
+ # Homepage of the dataset for documentation
85
+ homepage=_HOMEPAGE,
86
+ # License for the dataset if available
87
+ license=_LICENSE,
88
+ # Citation for the dataset
89
+ citation=_CITATION,
90
+ )
91
+
92
+ def _split_generators(self, dl_manager):
93
+ """Returns SplitGenerators."""
94
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
95
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
96
+
97
+ data_dir = self.config.data_dir
98
+
99
+ splits = []
100
+ for split in self.config.splits:
101
+ if split == 'train':
102
+ dataset = datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ # These kwargs will be passed to _generate_examples
105
+ gen_kwargs={
106
+ "json_path": os.path.join(data_dir, f"captions_train{self.config.name}.json"),
107
+ "image_dir": os.path.join(data_dir, f'train{self.config.name}'),
108
+ "split": "train",
109
+ }
110
+ )
111
+ elif split in ['val', 'valid', 'validation', 'dev']:
112
+ dataset = datasets.SplitGenerator(
113
+ name=datasets.Split.VALIDATION,
114
+ # These kwargs will be passed to _generate_examples
115
+ gen_kwargs={
116
+ "json_path": os.path.join(data_dir, f"captions_val{self.config.name}.json"),
117
+ "image_dir": os.path.join(data_dir, f'val{self.config.name}'),
118
+ "split": "valid",
119
+ },
120
+ )
121
+ elif split == 'test':
122
+ dataset = datasets.SplitGenerator(
123
+ name=datasets.Split.TEST,
124
+ # These kwargs will be passed to _generate_examples
125
+ gen_kwargs={
126
+ "json_path": os.path.join(data_dir, f'image_info_test{self.config.name}.json'),
127
+ "image_dir": os.path.join(data_dir, f'test{self.config.name}'),
128
+ "split": "test",
129
+ },
130
+ )
131
+ else:
132
+ continue
133
+
134
+ splits.append(dataset)
135
+
136
+ return splits
137
+
138
+ def _generate_examples(
139
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
140
+ self, json_path, image_dir, split
141
+ ):
142
+ """ Yields examples as (key, example) tuples. """
143
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
144
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
145
+
146
+ _features = ["image_id", "caption_id", "caption", "height", "width", "file_name", "coco_url", "image_path", "id"]
147
+ features = list(_features)
148
+
149
+ if split in "valid":
150
+ split = "val"
151
+
152
+ with open(json_path, 'r', encoding='UTF-8') as fp:
153
+ data = json.load(fp)
154
+
155
+ # list of dict
156
+ images = data["images"]
157
+ entries = images
158
+
159
+ # build a dict of image_id -> image info dict
160
+ d = {image["id"]: image for image in images}
161
+
162
+ # list of dict
163
+ if split in ["train", "val"]:
164
+ annotations = data["annotations"]
165
+
166
+ # build a dict of image_id ->
167
+ for annotation in annotations:
168
+ _id = annotation["id"]
169
+ image_info = d[annotation["image_id"]]
170
+ annotation.update(image_info)
171
+ annotation["id"] = _id
172
+
173
+ entries = annotations
174
+
175
+ for id_, entry in enumerate(entries):
176
+
177
+ entry = {k: v for k, v in entry.items() if k in features}
178
+
179
+ if split == "test":
180
+ entry["image_id"] = entry["id"]
181
+ entry["id"] = -1
182
+ entry["caption"] = -1
183
+
184
+ entry["caption_id"] = entry.pop("id")
185
+ entry["image_path"] = os.path.join(image_dir, entry["file_name"])
186
+
187
+ entry = {k: entry[k] for k in _features if k in entry}
188
+
189
+ print(entry)
190
+
191
+ yield str((entry["image_id"], entry["caption_id"])), entry
coco_dataset/dummy_data/annotations_trainval2017.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:803f8cc8c2bd46633be6866aa0230d25916e0c2de6cad85bf11573fbe352efc6
3
+ size 7458
coco_dataset/dummy_data/captions_train2017.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3c77d344bd683c0e9b235892a2bc6db096bcd470feb80a5928b98696739a34d
3
+ size 20225
coco_dataset/dummy_data/captions_val2017.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d081d092d0c7f7d052ce5759f366c0ddfe743418ea4d26ccad6a9d91ae0e4f51
3
+ size 20148
coco_dataset/dummy_data/image_info_test-dev2017.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47b81764a5e1b843363a4ee847c12caa19f2f80fef0c43fb512110c575f27b61
3
+ size 14535
coco_dataset/dummy_data/image_info_test2017.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eba76001d86881501288d8600812a43dcda2c3dd2f4bc46b84966ef45412ee80
3
+ size 15439
coco_dataset/dummy_data/image_info_test2017.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e30ca36eba1cb0a631bf78201c92f508be9e105777d0c8b763edbc8b8517d5a2
3
+ size 3498
coco_dataset/dummy_data/test2017.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abdc12551acb154ed65c38f493ce930ad22fffd74c1b1e1f811a46212eb28e91
3
+ size 3023535
coco_dataset/dummy_data/test2017/000000000001.jpg ADDED

Git LFS Details

  • SHA256: 91d74bd2a576101fdc78a448f8f8c52816dda6ec2e37e0bffcb9e30e950647ae
  • Pointer size: 131 Bytes
  • Size of remote file: 159 kB
coco_dataset/dummy_data/test2017/000000000016.jpg ADDED

Git LFS Details

  • SHA256: e77b3610a4d60decaf6399090e772f45f6bb69d13902e46f1aa67e6ffa44c05d
  • Pointer size: 131 Bytes
  • Size of remote file: 231 kB
coco_dataset/dummy_data/test2017/000000000019.jpg ADDED

Git LFS Details

  • SHA256: bd645634cc290ad2cba0d0544aea02347edb8510dbc69586d367afc68bafd5ed
  • Pointer size: 131 Bytes
  • Size of remote file: 285 kB
coco_dataset/dummy_data/test2017/000000000057.jpg ADDED

Git LFS Details

  • SHA256: 2c00c990da26ba8b195d8e1ffaa1085cad53be95ec8de83c5d4941c1dd0912f2
  • Pointer size: 131 Bytes
  • Size of remote file: 190 kB
coco_dataset/dummy_data/test2017/000000000063.jpg ADDED

Git LFS Details

  • SHA256: 8e6eb85f9bce5a7622123bec2c2b62d182b35f162b1d04f7dc4597ab055ac6db
  • Pointer size: 131 Bytes
  • Size of remote file: 216 kB
coco_dataset/dummy_data/test2017/000000000069.jpg ADDED

Git LFS Details

  • SHA256: 47193896385f8d284f3e20dd457253d2e51d4bd77e7e3531e4731a5f131a4493
  • Pointer size: 131 Bytes
  • Size of remote file: 113 kB
coco_dataset/dummy_data/test2017/000000000080.jpg ADDED

Git LFS Details

  • SHA256: 77393198302233c5df1f3b5bfe9e7fcd20af37402db4e29692d608aa0090b678
  • Pointer size: 131 Bytes
  • Size of remote file: 113 kB
coco_dataset/dummy_data/test2017/000000000090.jpg ADDED

Git LFS Details

  • SHA256: 9ff72dc5f69998b7777802f2d6c6dc261c67cf9f5ade8e0f94e030edd422467b
  • Pointer size: 131 Bytes
  • Size of remote file: 255 kB
coco_dataset/dummy_data/test2017/000000000106.jpg ADDED

Git LFS Details

  • SHA256: 4797648ce66c98cd4d12945b314120aa85b73f32db6a33f5bd07d7a290e872bb
  • Pointer size: 131 Bytes
  • Size of remote file: 248 kB
coco_dataset/dummy_data/test2017/000000000108.jpg ADDED

Git LFS Details

  • SHA256: 9dac65dacd78f1dbd389025524ca74055f3441419ec694e54633f699ef4ae1ab
  • Pointer size: 131 Bytes
  • Size of remote file: 273 kB
coco_dataset/dummy_data/test2017/000000000128.jpg ADDED

Git LFS Details

  • SHA256: 4741971ce3b5f5d3cf46b91ecce5cde63bcad143045c9739ecd39fee8bcd1832
  • Pointer size: 131 Bytes
  • Size of remote file: 212 kB
coco_dataset/dummy_data/test2017/000000000155.jpg ADDED

Git LFS Details

  • SHA256: 413ead1673ab92a827f3225de59b3100f591bb64f7886b31b692a712592ebe49
  • Pointer size: 130 Bytes
  • Size of remote file: 79.7 kB
coco_dataset/dummy_data/test2017/000000000161.jpg ADDED

Git LFS Details

  • SHA256: 22eaba0e35f9e452c8e208aabb570dcb5f92fe16d0d30cc28e44a22d613370d6
  • Pointer size: 131 Bytes
  • Size of remote file: 147 kB
coco_dataset/dummy_data/test2017/000000000171.jpg ADDED

Git LFS Details

  • SHA256: 51f983839ec43b66572f88b187c8da76902e2da62967cf1b7fc8f8444bf544f6
  • Pointer size: 130 Bytes
  • Size of remote file: 61.3 kB
coco_dataset/dummy_data/test2017/000000000178.jpg ADDED

Git LFS Details

  • SHA256: 47a086149db68c389925d97da1c56ca05e5375a3c29f418a61c5a155a5e6d9a2
  • Pointer size: 131 Bytes
  • Size of remote file: 173 kB
coco_dataset/dummy_data/test2017/000000000180.jpg ADDED

Git LFS Details

  • SHA256: 01a19909f472d5fd46e3604c164166b422e51995f06496b790a2e80576c90262
  • Pointer size: 131 Bytes
  • Size of remote file: 286 kB
coco_dataset/dummy_data/train2017.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:370a953a7907709727f53c989d8282a1c0e47fb6fc0900cebf1c67ba362c6e73
3
+ size 3590323
coco_dataset/dummy_data/train2017/000000000009.jpg ADDED

Git LFS Details

  • SHA256: 35cdfe8259aca40d564baf33ee749d82ce852446bd9574f0c47551d8bfffda99
  • Pointer size: 131 Bytes
  • Size of remote file: 224 kB
coco_dataset/dummy_data/train2017/000000000025.jpg ADDED

Git LFS Details

  • SHA256: d8f12a26d8803701cabac80494b080f998e5ed9bafaf61a2825ce6212c85487a
  • Pointer size: 131 Bytes
  • Size of remote file: 196 kB
coco_dataset/dummy_data/train2017/000000000030.jpg ADDED

Git LFS Details

  • SHA256: 0444b10826d376ad9075805061405f6071a62b80eda29c5f284ed77b093d5b1d
  • Pointer size: 130 Bytes
  • Size of remote file: 71.5 kB
coco_dataset/dummy_data/train2017/000000000034.jpg ADDED

Git LFS Details

  • SHA256: 2c46871034fa901ae795a8bb916ba7f2f728507cab9e511cced0986bd083d193
  • Pointer size: 131 Bytes
  • Size of remote file: 406 kB
coco_dataset/dummy_data/train2017/000000000036.jpg ADDED

Git LFS Details

  • SHA256: 7b04d9d0a1ea8b930e11f293a832bdfe8d43892fdb96f1038219196d41a86b95
  • Pointer size: 131 Bytes
  • Size of remote file: 260 kB
coco_dataset/dummy_data/train2017/000000000042.jpg ADDED

Git LFS Details

  • SHA256: 9da8dbb3f415b0549a2c7ef8930e245af324191cc4c4465a7d75f05d96b0781d
  • Pointer size: 131 Bytes
  • Size of remote file: 213 kB
coco_dataset/dummy_data/train2017/000000000049.jpg ADDED

Git LFS Details

  • SHA256: 0a29047d4bd83d26ae5370c03447bfa1ccd7005362ee180510e47af61746273a
  • Pointer size: 131 Bytes
  • Size of remote file: 125 kB
coco_dataset/dummy_data/train2017/000000000061.jpg ADDED

Git LFS Details

  • SHA256: 03919a304272b466ac9415ecc8eebf7e6ec6d5a370e027ea8ed8c348e73d4024
  • Pointer size: 131 Bytes
  • Size of remote file: 400 kB
coco_dataset/dummy_data/train2017/000000000064.jpg ADDED

Git LFS Details

  • SHA256: efb62bc395400df8dfe6ab5ea3577913ca029753628f37af4bcca8144fd67af5
  • Pointer size: 131 Bytes
  • Size of remote file: 221 kB
coco_dataset/dummy_data/train2017/000000000071.jpg ADDED

Git LFS Details

  • SHA256: c9e8680a3bdd1021d8cce453d91255df472e9c7cf79bf6312b80b5df8d7b665c
  • Pointer size: 131 Bytes
  • Size of remote file: 214 kB
coco_dataset/dummy_data/train2017/000000000072.jpg ADDED

Git LFS Details

  • SHA256: 34985f0e3d9e71652c47bce8813b6a1925f431c0925a094b37208a97df670a7b
  • Pointer size: 131 Bytes
  • Size of remote file: 239 kB
coco_dataset/dummy_data/train2017/000000000073.jpg ADDED

Git LFS Details

  • SHA256: a202bf8a3327508e135aa7a2ba01014f67cd68253d2eed33dd7a5e1e24523aef
  • Pointer size: 131 Bytes
  • Size of remote file: 384 kB
coco_dataset/dummy_data/train2017/000000000074.jpg ADDED

Git LFS Details

  • SHA256: de6e1fbb6b8569bb85d2e82c9aa6cc528ac87fba11135c1f3691789a27fc13a5
  • Pointer size: 131 Bytes
  • Size of remote file: 176 kB
coco_dataset/dummy_data/train2017/000000000077.jpg ADDED

Git LFS Details

  • SHA256: c45ca68cf1c5af1efb6dc04df82e8821580e97862761914e665c7b965850b14d
  • Pointer size: 131 Bytes
  • Size of remote file: 159 kB
coco_dataset/dummy_data/train2017/000000000078.jpg ADDED

Git LFS Details

  • SHA256: fbf237b294405624b5d662fe7f45a4c1b77bc275ade45f0737f5a6f6b1b6bb19
  • Pointer size: 131 Bytes
  • Size of remote file: 210 kB
coco_dataset/dummy_data/train2017/000000000081.jpg ADDED

Git LFS Details

  • SHA256: 1a57ac5980eff9d319871e17b5fe17363b5788861957d665cfe9e9c43fc9c27a
  • Pointer size: 131 Bytes
  • Size of remote file: 113 kB
coco_dataset/dummy_data/val2017.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3625319b0adc788f918dea4a32a3c551323fd883888e8a03668cb47dd823b94
3
+ size 2556555
coco_dataset/dummy_data/val2017/000000000139.jpg ADDED

Git LFS Details

  • SHA256: ffe0f0cec3b2e27aab1967229cdf0a0d7751dcdd5800322f0b8ac0dffb3b8a8d
  • Pointer size: 131 Bytes
  • Size of remote file: 162 kB
coco_dataset/dummy_data/val2017/000000000285.jpg ADDED

Git LFS Details

  • SHA256: f3a2974ce3686332609124c70e3e6a2e3aca43fccf1cd1bd7c5c03820977f57d
  • Pointer size: 131 Bytes
  • Size of remote file: 336 kB
coco_dataset/dummy_data/val2017/000000000632.jpg ADDED

Git LFS Details

  • SHA256: a4cd7f45ac1ce27eaafb254b23af7c0b18a064be08870ceaaf03b2147f2ce550
  • Pointer size: 131 Bytes
  • Size of remote file: 156 kB
coco_dataset/dummy_data/val2017/000000000724.jpg ADDED

Git LFS Details

  • SHA256: 5c0e559c75d3969c8e3e297b61f61063f78045c9d4802b526ba616361f3823fd
  • Pointer size: 131 Bytes
  • Size of remote file: 130 kB
coco_dataset/dummy_data/val2017/000000000776.jpg ADDED

Git LFS Details

  • SHA256: 1dd31e9059c491992be2f562624eb4093e17aee08b4f7baf5ff9ea24543b0a33
  • Pointer size: 131 Bytes
  • Size of remote file: 176 kB
coco_dataset/dummy_data/val2017/000000000785.jpg ADDED

Git LFS Details

  • SHA256: 83981537a7baeafbeb9c8cb67b3484dc26433f574b3685d021fa537e277e4726
  • Pointer size: 131 Bytes
  • Size of remote file: 134 kB