arxyzan commited on
Commit
06d88a6
1 Parent(s): 19b5e2e

Delete flickr30k-fa.py

Browse files
Files changed (1) hide show
  1. flickr30k-fa.py +0 -79
flickr30k-fa.py DELETED
@@ -1,79 +0,0 @@
1
- import csv
2
- import os
3
-
4
- import datasets
5
-
6
- logger = datasets.logging.get_logger(__name__)
7
-
8
- _CITATION = """"""
9
-
10
- _DESCRIPTION = """Flickr30k filtered and translated to Persian made by Sajjad Ayoubi https://www.kaggle.com/datasets/sajjadayobi360/flickrfa"""
11
-
12
- _DOWNLOAD_URLS = {
13
- "train": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/annotations_train.csv",
14
- "test": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/annotations_test.csv",
15
- "data": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/images.zip",
16
- }
17
-
18
- ZIP_IMAGES_DIR = "images"
19
-
20
-
21
- class Flickr30kFaConfig(datasets.BuilderConfig):
22
- def __init__(self, **kwargs):
23
- super(Flickr30kFaConfig, self).__init__(**kwargs)
24
-
25
-
26
- class Flickr30kFa(datasets.GeneratorBasedBuilder):
27
- BUILDER_CONFIGS = [
28
- Flickr30kFaConfig(
29
- name="Persian flickr30k",
30
- version=datasets.Version("1.0.0"),
31
- description=_DESCRIPTION,
32
- ),
33
- ]
34
-
35
- def _info(self):
36
- return datasets.DatasetInfo(
37
- description=_DESCRIPTION,
38
- features=datasets.Features(
39
- {
40
- "image_path": datasets.Value("string"),
41
- "label": datasets.Value("string"),
42
- }
43
- ),
44
- citation=_CITATION,
45
- )
46
-
47
- def _split_generators(self, dl_manager):
48
- """
49
- Return SplitGenerators.
50
- """
51
-
52
- train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
53
- test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
54
- archive_path = dl_manager.download(_DOWNLOAD_URLS["data"])
55
- images_dir = dl_manager.extract(archive_path) if not dl_manager.is_streaming else ""
56
- images_dir = os.path.join(images_dir, ZIP_IMAGES_DIR)
57
-
58
- return [
59
- datasets.SplitGenerator(
60
- name=datasets.Split.TRAIN, gen_kwargs={"annotations_file": train_path, "images_dir": images_dir}
61
- ),
62
- datasets.SplitGenerator(
63
- name=datasets.Split.TEST, gen_kwargs={"annotations_file": test_path, "images_dir": images_dir}
64
- ),
65
- ]
66
-
67
- def _generate_examples(self, annotations_file, images_dir):
68
- logger.info("⏳ Generating examples from = %s", annotations_file)
69
-
70
- with open(annotations_file, encoding="utf-8") as csv_file:
71
- csv_reader = csv.reader(csv_file, quotechar='"', skipinitialspace=True)
72
-
73
- # Skip header
74
- next(csv_reader, None)
75
-
76
- for id_, row in enumerate(csv_reader):
77
- filename, label = row
78
- image_path = os.path.join(images_dir, filename)
79
- yield id_, {"image_path": image_path, "label": label}