misikoff commited on
Commit
500b356
1 Parent(s): ec8644a

fix: simplify processors

Browse files
new_dataset_script.py DELETED
@@ -1,183 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
-
18
- import csv
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
- # TODO: Add BibTeX citation
25
- # Find for instance the citation on arxiv or on the dataset repo/website
26
- _CITATION = """\
27
- @InProceedings{huggingface:dataset,
28
- title = {A great new dataset},
29
- author={huggingface, Inc.
30
- },
31
- year={2020}
32
- }
33
- """
34
-
35
- # TODO: Add description of the dataset here
36
- # You can copy an official description
37
- _DESCRIPTION = """\
38
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
39
- """
40
-
41
- # TODO: Add a link to an official homepage for the dataset here
42
- _HOMEPAGE = ""
43
-
44
- # TODO: Add the licence for the dataset here if you can find it
45
- _LICENSE = ""
46
-
47
- # TODO: Add link to the official dataset URLs here
48
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
- _URLS = {
51
- "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
52
- "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
53
- }
54
-
55
-
56
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
57
- class NewDataset(datasets.GeneratorBasedBuilder):
58
- """TODO: Short description of my dataset."""
59
-
60
- VERSION = datasets.Version("1.1.0")
61
-
62
- # This is an example of a dataset with multiple configurations.
63
- # If you don't want/need to define several sub-sets in your dataset,
64
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
65
-
66
- # If you need to make complex sub-parts in the datasets with configurable options
67
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
68
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
69
-
70
- # You will be able to load one or the other configurations in the following list with
71
- # data = datasets.load_dataset('my_dataset', 'first_domain')
72
- # data = datasets.load_dataset('my_dataset', 'second_domain')
73
- BUILDER_CONFIGS = [
74
- datasets.BuilderConfig(
75
- name="first_domain",
76
- version=VERSION,
77
- description="This part of my dataset covers a first domain",
78
- ),
79
- datasets.BuilderConfig(
80
- name="second_domain",
81
- version=VERSION,
82
- description="This part of my dataset covers a second domain",
83
- ),
84
- ]
85
-
86
- DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
87
-
88
- def _info(self):
89
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
90
- if (
91
- self.config.name == "first_domain"
92
- ): # This is the name of the configuration selected in BUILDER_CONFIGS above
93
- features = datasets.Features(
94
- {
95
- "sentence": datasets.Value("string"),
96
- "option1": datasets.Value("string"),
97
- "answer": datasets.Value("string"),
98
- # These are the features of your dataset like images, labels ...
99
- }
100
- )
101
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
102
- features = datasets.Features(
103
- {
104
- "sentence": datasets.Value("string"),
105
- "option2": datasets.Value("string"),
106
- "second_domain_answer": datasets.Value("string"),
107
- # These are the features of your dataset like images, labels ...
108
- }
109
- )
110
- return datasets.DatasetInfo(
111
- # This is the description that will appear on the datasets page.
112
- description=_DESCRIPTION,
113
- # This defines the different columns of the dataset and their types
114
- features=features, # Here we define them above because they are different between the two configurations
115
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
116
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
117
- # supervised_keys=("sentence", "label"),
118
- # Homepage of the dataset for documentation
119
- homepage=_HOMEPAGE,
120
- # License for the dataset if available
121
- license=_LICENSE,
122
- # Citation for the dataset
123
- citation=_CITATION,
124
- )
125
-
126
- def _split_generators(self, dl_manager):
127
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
128
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
129
-
130
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
131
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
132
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
133
- urls = _URLS[self.config.name]
134
- data_dir = dl_manager.download_and_extract(urls)
135
- return [
136
- datasets.SplitGenerator(
137
- name=datasets.Split.TRAIN,
138
- # These kwargs will be passed to _generate_examples
139
- gen_kwargs={
140
- "filepath": os.path.join(data_dir, "train.jsonl"),
141
- "split": "train",
142
- },
143
- ),
144
- datasets.SplitGenerator(
145
- name=datasets.Split.VALIDATION,
146
- # These kwargs will be passed to _generate_examples
147
- gen_kwargs={
148
- "filepath": os.path.join(data_dir, "dev.jsonl"),
149
- "split": "dev",
150
- },
151
- ),
152
- datasets.SplitGenerator(
153
- name=datasets.Split.TEST,
154
- # These kwargs will be passed to _generate_examples
155
- gen_kwargs={
156
- "filepath": os.path.join(data_dir, "test.jsonl"),
157
- "split": "test",
158
- },
159
- ),
160
- ]
161
-
162
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
163
- def _generate_examples(self, filepath, split):
164
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
165
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
166
- with open(filepath, encoding="utf-8") as f:
167
- for key, row in enumerate(f):
168
- data = json.loads(row)
169
- if self.config.name == "first_domain":
170
- # Yields examples as (key, example) tuples
171
- yield key, {
172
- "sentence": data["sentence"],
173
- "option1": data["option1"],
174
- "answer": "" if split == "test" else data["answer"],
175
- }
176
- else:
177
- yield key, {
178
- "sentence": data["sentence"],
179
- "option2": data["option2"],
180
- "second_domain_answer": (
181
- "" if split == "test" else data["second_domain_answer"]
182
- ),
183
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
processors/days_on_market.ipynb CHANGED
@@ -11,7 +11,6 @@
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
14
- " coalesce_columns,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
17
  ")"
@@ -329,8 +328,6 @@
329
  " ],\n",
330
  ")\n",
331
  "\n",
332
- "combined_df = coalesce_columns(combined_df)\n",
333
- "\n",
334
  "combined_df"
335
  ]
336
  },
 
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
 
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
16
  ")"
 
328
  " ],\n",
329
  ")\n",
330
  "\n",
 
 
331
  "combined_df"
332
  ]
333
  },
processors/for_sale_listings.ipynb CHANGED
@@ -11,7 +11,6 @@
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
14
- " coalesce_columns,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
17
  ")"
@@ -397,8 +396,6 @@
397
  " ],\n",
398
  ")\n",
399
  "\n",
400
- "combined_df = coalesce_columns(combined_df)\n",
401
- "\n",
402
  "combined_df"
403
  ]
404
  },
 
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
 
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
16
  ")"
 
396
  " ],\n",
397
  ")\n",
398
  "\n",
 
 
399
  "combined_df"
400
  ]
401
  },
processors/helpers.py CHANGED
@@ -2,6 +2,22 @@ import pandas as pd
2
  import os
3
 
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  def get_combined_df(data_frames, on):
6
  combined_df = None
7
  if len(data_frames) > 1:
@@ -19,22 +35,8 @@ def get_combined_df(data_frames, on):
19
  elif len(data_frames) == 1:
20
  combined_df = data_frames[0]
21
 
22
- return combined_df
23
-
24
 
25
- def coalesce_columns(
26
- df,
27
- ):
28
- columns_to_coalesce = [col for col in df.columns if "_" not in col]
29
- for index, row in df.iterrows():
30
- for col in df.columns:
31
- for column_to_coalesce in columns_to_coalesce:
32
- if column_to_coalesce in col and "_" in col:
33
- if not pd.isna(row[col]):
34
- df.at[index, column_to_coalesce] = row[col]
35
-
36
- # remove columns with underscores
37
- combined_df = df[columns_to_coalesce]
38
  return combined_df
39
 
40
 
 
2
  import os
3
 
4
 
5
+ def coalesce_columns(
6
+ df,
7
+ ):
8
+ columns_to_coalesce = [col for col in df.columns if "_" not in col]
9
+ for index, row in df.iterrows():
10
+ for col in df.columns:
11
+ for column_to_coalesce in columns_to_coalesce:
12
+ if column_to_coalesce in col and "_" in col:
13
+ if not pd.isna(row[col]):
14
+ df.at[index, column_to_coalesce] = row[col]
15
+
16
+ # remove columns with underscores
17
+ combined_df = df[columns_to_coalesce]
18
+ return combined_df
19
+
20
+
21
  def get_combined_df(data_frames, on):
22
  combined_df = None
23
  if len(data_frames) > 1:
 
35
  elif len(data_frames) == 1:
36
  combined_df = data_frames[0]
37
 
38
+ combined_df = coalesce_columns(combined_df)
 
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  return combined_df
41
 
42
 
processors/home_value_forecasts.ipynb CHANGED
@@ -9,7 +9,7 @@
9
  "import pandas as pd\n",
10
  "import os\n",
11
  "\n",
12
- "from helpers import get_combined_df, coalesce_columns, save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
@@ -414,8 +414,6 @@
414
  " ],\n",
415
  ")\n",
416
  "\n",
417
- "combined_df = coalesce_columns(combined_df)\n",
418
- "\n",
419
  "combined_df"
420
  ]
421
  },
 
9
  "import pandas as pd\n",
10
  "import os\n",
11
  "\n",
12
+ "from helpers import get_combined_df, save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
 
414
  " ],\n",
415
  ")\n",
416
  "\n",
 
 
417
  "combined_df"
418
  ]
419
  },
processors/home_values.ipynb CHANGED
@@ -11,7 +11,6 @@
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
14
- " coalesce_columns,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
17
  ")"
@@ -466,8 +465,6 @@
466
  " ],\n",
467
  ")\n",
468
  "\n",
469
- "combined_df = coalesce_columns(combined_df)\n",
470
- "\n",
471
  "combined_df"
472
  ]
473
  },
 
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
 
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
16
  ")"
 
465
  " ],\n",
466
  ")\n",
467
  "\n",
 
 
468
  "combined_df"
469
  ]
470
  },
processors/new_construction.ipynb CHANGED
@@ -11,7 +11,6 @@
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
14
- " coalesce_columns,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
17
  ")"
@@ -315,8 +314,6 @@
315
  " ],\n",
316
  ")\n",
317
  "\n",
318
- "combined_df = coalesce_columns(combined_df)\n",
319
- "\n",
320
  "combined_df"
321
  ]
322
  },
 
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
 
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
16
  ")"
 
314
  " ],\n",
315
  ")\n",
316
  "\n",
 
 
317
  "combined_df"
318
  ]
319
  },
processors/rentals.ipynb CHANGED
@@ -11,7 +11,6 @@
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
14
- " coalesce_columns,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
17
  ")"
@@ -438,8 +437,6 @@
438
  " ],\n",
439
  ")\n",
440
  "\n",
441
- "combined_df = coalesce_columns(combined_df)\n",
442
- "\n",
443
  "combined_df"
444
  ]
445
  },
 
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
 
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
16
  ")"
 
437
  " ],\n",
438
  ")\n",
439
  "\n",
 
 
440
  "combined_df"
441
  ]
442
  },
processors/sales.ipynb CHANGED
@@ -11,7 +11,6 @@
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
14
- " coalesce_columns,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
17
  ")"
@@ -525,8 +524,6 @@
525
  " ],\n",
526
  ")\n",
527
  "\n",
528
- "combined_df = coalesce_columns(combined_df)\n",
529
- "\n",
530
  "combined_df"
531
  ]
532
  },
 
11
  "\n",
12
  "from helpers import (\n",
13
  " get_combined_df,\n",
 
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
16
  ")"
 
524
  " ],\n",
525
  ")\n",
526
  "\n",
 
 
527
  "combined_df"
528
  ]
529
  },