misikoff commited on
Commit
51f8985
1 Parent(s): 13130b6

feat: see what happens with an empty dataset loading script

Browse files
Files changed (2) hide show
  1. process_home_value_forecasts.ipynb +1 -1
  2. zillow.py +183 -0
process_home_value_forecasts.ipynb CHANGED
@@ -755,7 +755,7 @@
755
  "for index, row in final_df.iterrows():\n",
756
  " if row['RegionType'] == 'msa':\n",
757
  " regionName = row['RegionName']\n",
758
- " final_df.at[index, 'Metro'] = regionName\n",
759
  " \n",
760
  " city = regionName.split(', ')[0]\n",
761
  " final_df.at[index, 'City'] = city\n",
 
755
  "for index, row in final_df.iterrows():\n",
756
  " if row['RegionType'] == 'msa':\n",
757
  " regionName = row['RegionName']\n",
758
+ " # final_df.at[index, 'Metro'] = regionName\n",
759
  " \n",
760
  " city = regionName.split(', ')[0]\n",
761
  " final_df.at[index, 'City'] = city\n",
zillow.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+ # TODO: Add BibTeX citation
25
+ # Find for instance the citation on arxiv or on the dataset repo/website
26
+ _CITATION = """\
27
+ @InProceedings{huggingface:dataset,
28
+ title = {A great new dataset},
29
+ author={huggingface, Inc.
30
+ },
31
+ year={2020}
32
+ }
33
+ """
34
+
35
+ # TODO: Add description of the dataset here
36
+ # You can copy an official description
37
+ _DESCRIPTION = """\
38
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
39
+ """
40
+
41
+ # TODO: Add a link to an official homepage for the dataset here
42
+ _HOMEPAGE = ""
43
+
44
+ # TODO: Add the licence for the dataset here if you can find it
45
+ _LICENSE = ""
46
+
47
+ # TODO: Add link to the official dataset URLs here
48
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
+ _URLS = {
51
+ "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
52
+ "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
53
+ }
54
+
55
+
56
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
57
+ class NewDataset(datasets.GeneratorBasedBuilder):
58
+ """TODO: Short description of my dataset."""
59
+
60
+ VERSION = datasets.Version("1.1.0")
61
+
62
+ # This is an example of a dataset with multiple configurations.
63
+ # If you don't want/need to define several sub-sets in your dataset,
64
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
65
+
66
+ # If you need to make complex sub-parts in the datasets with configurable options
67
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
68
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
69
+
70
+ # You will be able to load one or the other configurations in the following list with
71
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
72
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
73
+ BUILDER_CONFIGS = [
74
+ datasets.BuilderConfig(
75
+ name="first_domain",
76
+ version=VERSION,
77
+ description="This part of my dataset covers a first domain",
78
+ ),
79
+ datasets.BuilderConfig(
80
+ name="second_domain",
81
+ version=VERSION,
82
+ description="This part of my dataset covers a second domain",
83
+ ),
84
+ ]
85
+
86
+ DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
87
+
88
+ def _info(self):
89
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
90
+ if (
91
+ self.config.name == "first_domain"
92
+ ): # This is the name of the configuration selected in BUILDER_CONFIGS above
93
+ features = datasets.Features(
94
+ {
95
+ "sentence": datasets.Value("string"),
96
+ "option1": datasets.Value("string"),
97
+ "answer": datasets.Value("string"),
98
+ # These are the features of your dataset like images, labels ...
99
+ }
100
+ )
101
+ else: # This is an example to show how to have different features for "first_domain" and "second_domain"
102
+ features = datasets.Features(
103
+ {
104
+ "sentence": datasets.Value("string"),
105
+ "option2": datasets.Value("string"),
106
+ "second_domain_answer": datasets.Value("string"),
107
+ # These are the features of your dataset like images, labels ...
108
+ }
109
+ )
110
+ return datasets.DatasetInfo(
111
+ # This is the description that will appear on the datasets page.
112
+ description=_DESCRIPTION,
113
+ # This defines the different columns of the dataset and their types
114
+ features=features, # Here we define them above because they are different between the two configurations
115
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
116
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
117
+ # supervised_keys=("sentence", "label"),
118
+ # Homepage of the dataset for documentation
119
+ homepage=_HOMEPAGE,
120
+ # License for the dataset if available
121
+ license=_LICENSE,
122
+ # Citation for the dataset
123
+ citation=_CITATION,
124
+ )
125
+
126
+ def _split_generators(self, dl_manager):
127
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
128
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
129
+
130
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
131
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
132
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
133
+ urls = _URLS[self.config.name]
134
+ data_dir = dl_manager.download_and_extract(urls)
135
+ return [
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.TRAIN,
138
+ # These kwargs will be passed to _generate_examples
139
+ gen_kwargs={
140
+ "filepath": os.path.join(data_dir, "train.jsonl"),
141
+ "split": "train",
142
+ },
143
+ ),
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.VALIDATION,
146
+ # These kwargs will be passed to _generate_examples
147
+ gen_kwargs={
148
+ "filepath": os.path.join(data_dir, "dev.jsonl"),
149
+ "split": "dev",
150
+ },
151
+ ),
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TEST,
154
+ # These kwargs will be passed to _generate_examples
155
+ gen_kwargs={
156
+ "filepath": os.path.join(data_dir, "test.jsonl"),
157
+ "split": "test",
158
+ },
159
+ ),
160
+ ]
161
+
162
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
163
+ def _generate_examples(self, filepath, split):
164
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
165
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
166
+ with open(filepath, encoding="utf-8") as f:
167
+ for key, row in enumerate(f):
168
+ data = json.loads(row)
169
+ if self.config.name == "first_domain":
170
+ # Yields examples as (key, example) tuples
171
+ yield key, {
172
+ "sentence": data["sentence"],
173
+ "option1": data["option1"],
174
+ "answer": "" if split == "test" else data["answer"],
175
+ }
176
+ else:
177
+ yield key, {
178
+ "sentence": data["sentence"],
179
+ "option2": data["option2"],
180
+ "second_domain_answer": (
181
+ "" if split == "test" else data["second_domain_answer"]
182
+ ),
183
+ }