Datasets:

Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
cwchen-cm commited on
Commit
00a76a8
1 Parent(s): dc72671

Delete shopping-queries-image-dataset-remove.py

Browse files
shopping-queries-image-dataset-remove.py DELETED
@@ -1,181 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
-
18
- import csv
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- # TODO: Add BibTeX citation
26
- # Find for instance the citation on arxiv or on the dataset repo/website
27
- _CITATION = """\
28
- @InProceedings{SIGIR-eCom 2024,
29
- title = {Shopping Queries Image Dataset (SQID): An Image-Enriched ESCI Dataset for Exploring Multimodal Learning in Product Search},
30
- author={Marie Al Ghossein, Ching-Wei Chen, Jason Tang},
31
- year={2024}
32
- }
33
- """
34
-
35
- # TODO: Add description of the dataset here
36
- # You can copy an official description
37
- _DESCRIPTION = """\
38
- The Shopping Queries Image Dataset (SQID) is an extension of the Amazon Shopping Queries Dataset which has been enriched with image information associated with 190,000 products.
39
- """
40
-
41
- # TODO: Add a link to an official homepage for the dataset here
42
- _HOMEPAGE = ""
43
-
44
- # TODO: Add the licence for the dataset here if you can find it
45
- _LICENSE = "MIT"
46
-
47
- # TODO: Add link to the official dataset URLs here
48
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
- _BASE_URL = "https://huggingface.co/datasets/crossingminds/shopping-queries-image-dataset/resolve/data/"
51
- _URLS = {
52
- "product_image_urls": "https://huggingface.co/datasets/crossingminds/shopping-queries-image-dataset/resolve/main/data/product_image_urls.parquet",
53
- "product_features": "https://huggingface.co/datasets/crossingminds/shopping-queries-image-dataset/resolve/main/data/product_features.parquet",
54
- "query_features": "https://huggingface.co/datasets/crossingminds/shopping-queries-image-dataset/resolve/main/data/query_features.parquet",
55
- "supp_product_image_urls": "https://huggingface.co/datasets/crossingminds/shopping-queries-image-dataset/resolve/main/data/supp_product_image_urls.parquet",
56
- }
57
-
58
-
59
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
60
- class ShoppingQueriesImageDataset(datasets.GeneratorBasedBuilder):
61
- """Shopping Queries Image Dataset"""
62
-
63
- VERSION = datasets.Version("1.0.0")
64
-
65
- # This is an example of a dataset with multiple configurations.
66
- # If you don't want/need to define several sub-sets in your dataset,
67
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
68
-
69
- # If you need to make complex sub-parts in the datasets with configurable options
70
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
71
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
72
-
73
- # You will be able to load one or the other configurations in the following list with
74
- # data = datasets.load_dataset('my_dataset', 'first_domain')
75
- # data = datasets.load_dataset('my_dataset', 'second_domain')
76
- BUILDER_CONFIGS = [
77
- datasets.BuilderConfig(name="product_image_urls", version=VERSION, description="Image URLs for products"),
78
- datasets.BuilderConfig(name="product_features", version=VERSION, description="CLIP embeddings for products"),
79
- datasets.BuilderConfig(name="query_features", version=VERSION, description="CLIP embeddings for queries"),
80
- datasets.BuilderConfig(name="supp_product_image_urls", version=VERSION, description="Image URLs for supplemental set of products"),
81
- ]
82
-
83
- DEFAULT_CONFIG_NAME = "product_image_urls"
84
-
85
- def _info(self):
86
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
87
- if self.config.name == "product_image_urls":
88
- features = datasets.Features(
89
- {
90
- "product_id": datasets.Value("string"),
91
- "image_url": datasets.Value("string")
92
- }
93
- )
94
- elif self.config.name == "product_features":
95
- features = datasets.Features(
96
- {
97
- "product_id": datasets.Value("string"),
98
- "clip_text_features": datasets.Sequence(datasets.Value("float32")),
99
- "clip_image_features": datasets.Sequence(datasets.Value("float32"))
100
- }
101
- )
102
- elif self.config.name == "query_features":
103
- features = datasets.Features(
104
- {
105
- "query_id": datasets.Value("string"),
106
- "clip_text_features": datasets.Sequence(datasets.Value("float32"))
107
- }
108
- )
109
- elif self.config.name == "product_features":
110
- features = datasets.Features(
111
- {
112
- "product_id": datasets.Value("string"),
113
- "image_url": datasets.Value("string")
114
- }
115
- )
116
- else:
117
- raise ValueError(f"Invalid configuration name: {self.config.name}")
118
-
119
- return datasets.DatasetInfo(
120
- # This is the description that will appear on the datasets page.
121
- description=_DESCRIPTION,
122
- # This defines the different columns of the dataset and their types
123
- features=features, # Here we define them above because they are different between the two configurations
124
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
125
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
126
- # supervised_keys=("sentence", "label"),
127
- # Homepage of the dataset for documentation
128
- homepage=_HOMEPAGE,
129
- # License for the dataset if available
130
- license=_LICENSE,
131
- # Citation for the dataset
132
- citation=_CITATION,
133
- )
134
-
135
- def _split_generators(self, dl_manager):
136
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
137
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
138
-
139
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
140
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
141
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
142
- urls = _URLS[self.config.name]
143
- data_path = dl_manager.download_and_extract(urls)
144
- return [
145
- datasets.SplitGenerator(
146
- name=datasets.Split.TRAIN,
147
- # These kwargs will be passed to _generate_examples
148
- gen_kwargs={
149
- "filepath": data_path,
150
- "split": "train",
151
- },
152
- ),
153
- #datasets.SplitGenerator(
154
- # name=datasets.Split.VALIDATION,
155
- # # These kwargs will be passed to _generate_examples
156
- # gen_kwargs={
157
- # "filepath": os.path.join(data_dir, "dev.jsonl"),
158
- # "split": "dev",
159
- # },
160
- #),
161
- #datasets.SplitGenerator(
162
- # name=datasets.Split.TEST,
163
- # # These kwargs will be passed to _generate_examples
164
- # gen_kwargs={
165
- # "filepath": os.path.join(data_dir, "test.jsonl"),
166
- # "split": "test"
167
- # },
168
- ]
169
-
170
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
171
- def _generate_examples(self, filepath, split):
172
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
173
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
174
- """Yields examples as (key, example) tuples."""
175
- # Load parquet file
176
- print(f"filepath: {filepath}")
177
- dataset = datasets.Dataset.from_parquet(filepath)
178
-
179
- # Yield example tuples
180
- for key, record in enumerate(dataset):
181
- yield key, record