fixing script
Browse files- .DS_Store +0 -0
- .gitignore +2 -1
- data/images.tar.gz β images.tar.gz +0 -0
- image-text-demo.py β loading.py +12 -15
- scripts/create-jsonl.py +23 -0
- scripts/load.py +1 -2
- data/texts.tar.gz β texts.tar.gz +0 -0
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
__pycache__
|
|
|
|
1 |
+
__pycache__
|
2 |
+
other
|
data/images.tar.gz β images.tar.gz
RENAMED
File without changes
|
image-text-demo.py β loading.py
RENAMED
@@ -1,9 +1,15 @@
|
|
1 |
import datasets
|
2 |
|
3 |
_CITATION = """\
|
|
|
|
|
|
|
|
|
|
|
4 |
"""
|
5 |
|
6 |
_DESCRIPTION = """\
|
|
|
7 |
"""
|
8 |
_HOMEPAGE = "https://huggingface.co/datasets/danbrown/testman-dataset"
|
9 |
|
@@ -29,19 +35,13 @@ class ImageSet(datasets.GeneratorBasedBuilder):
|
|
29 |
)
|
30 |
|
31 |
def _split_generators(self, dl_manager):
|
32 |
-
images_archive = dl_manager.download(f"{_REPO}/resolve/main/
|
33 |
image_iters = dl_manager.iter_archive(images_archive)
|
34 |
-
|
35 |
-
|
36 |
-
# texts_archive = dl_manager.download(f"{_REPO}/resolve/main/data/texts.tar.gz")
|
37 |
-
# text_iters = dl_manager.iter_archive(texts_archive)
|
38 |
-
|
39 |
return [
|
40 |
datasets.SplitGenerator(
|
41 |
name=datasets.Split.TRAIN,
|
42 |
gen_kwargs={
|
43 |
-
"images": image_iters
|
44 |
-
# "texts": text_iters,
|
45 |
}
|
46 |
),
|
47 |
]
|
@@ -50,12 +50,9 @@ class ImageSet(datasets.GeneratorBasedBuilder):
|
|
50 |
""" This function returns the examples in the raw (text) form."""
|
51 |
|
52 |
for idx, (filepath, image) in enumerate(images):
|
53 |
-
|
54 |
-
|
55 |
-
# description = next(texts)[1].read().decode('utf-8')
|
56 |
-
|
57 |
yield idx, {
|
58 |
"image": {"path": filepath, "bytes": image.read()},
|
59 |
-
"text":
|
60 |
-
|
61 |
-
}
|
|
|
1 |
import datasets
|
2 |
|
3 |
_CITATION = """\
|
4 |
+
@InProceedings{huggingface:dataset,
|
5 |
+
title = {Small image-text set},
|
6 |
+
author={James Briggs},
|
7 |
+
year={2022}
|
8 |
+
}
|
9 |
"""
|
10 |
|
11 |
_DESCRIPTION = """\
|
12 |
+
Demo dataset for testing or showing image-text capabilities.
|
13 |
"""
|
14 |
_HOMEPAGE = "https://huggingface.co/datasets/danbrown/testman-dataset"
|
15 |
|
|
|
35 |
)
|
36 |
|
37 |
def _split_generators(self, dl_manager):
|
38 |
+
images_archive = dl_manager.download(f"{_REPO}/resolve/main/images.tar.gz")
|
39 |
image_iters = dl_manager.iter_archive(images_archive)
|
|
|
|
|
|
|
|
|
|
|
40 |
return [
|
41 |
datasets.SplitGenerator(
|
42 |
name=datasets.Split.TRAIN,
|
43 |
gen_kwargs={
|
44 |
+
"images": image_iters
|
|
|
45 |
}
|
46 |
),
|
47 |
]
|
|
|
50 |
""" This function returns the examples in the raw (text) form."""
|
51 |
|
52 |
for idx, (filepath, image) in enumerate(images):
|
53 |
+
description = filepath.split('/')[-1][:-4]
|
54 |
+
description = description.replace('_', ' ')
|
|
|
|
|
55 |
yield idx, {
|
56 |
"image": {"path": filepath, "bytes": image.read()},
|
57 |
+
"text": description,
|
58 |
+
}
|
|
scripts/create-jsonl.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from othersets import load_otherset
|
2 |
+
import os
|
3 |
+
|
4 |
+
|
5 |
+
# load all text files from ./other/texts, return a list of strings
|
6 |
+
def load_texts():
|
7 |
+
texts = []
|
8 |
+
for filename in os.listdir('./other/texts'):
|
9 |
+
with open('./other/texts/' + filename, 'r') as f:
|
10 |
+
texts.append(f.read())
|
11 |
+
return texts
|
12 |
+
|
13 |
+
# create a jsonl file with the text content on the 'target' column, and the humber in the 'image_relpath '
|
14 |
+
def create_jsonl(texts):
|
15 |
+
with open('./other/texts.jsonl', 'w') as f:
|
16 |
+
for i, text in enumerate(texts):
|
17 |
+
f.write('{"image_relpath": "' + str(i+1) + '.png", "target": "' + text + '"}\n')
|
18 |
+
|
19 |
+
|
20 |
+
# start the process
|
21 |
+
if __name__ == '__main__':
|
22 |
+
texts = load_texts()
|
23 |
+
create_jsonl(texts)
|
scripts/load.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
from datasets import load_dataset
|
2 |
|
3 |
dataset = load_dataset('danbrown/testman-dataset', split='train')
|
4 |
-
|
5 |
-
print(dataset)
|
|
|
1 |
from datasets import load_dataset
|
2 |
|
3 |
dataset = load_dataset('danbrown/testman-dataset', split='train')
|
4 |
+
print(dataset[0])
|
|
data/texts.tar.gz β texts.tar.gz
RENAMED
File without changes
|