misikoff commited on
Commit
cf9e214
1 Parent(s): ddb4add

fix: simplify structure of processors through shared functions

Browse files
.gitignore CHANGED
@@ -1 +1,3 @@
1
- *.DS_STORE
 
 
 
1
+ *.DS_STORE
2
+
3
+ *__pycache__*
README.md CHANGED
@@ -15,7 +15,7 @@ Updated 2023-02-01
15
 
16
  This dataset contains several configs produced based on files available at https://www.zillow.com/research/data/.
17
 
18
- supported configs:
19
  <!-- list each with a short description (1 sentence) -->
20
  - [`home_values`](#home-values): Zillow Home Value Index (ZHVI) for all homes, mid-tier, bottom-tier, and top-tier homes.
21
  - [`home_values_forecasts`](#home-values-forecasts): Zillow Home Value Forecast (ZHVF) for all homes, mid-tier, bottom-tier, and top-tier homes.
 
15
 
16
  This dataset contains several configs produced based on files available at https://www.zillow.com/research/data/.
17
 
18
+ Supported configs:
19
  <!-- list each with a short description (1 sentence) -->
20
  - [`home_values`](#home-values): Zillow Home Value Index (ZHVI) for all homes, mid-tier, bottom-tier, and top-tier homes.
21
  - [`home_values_forecasts`](#home-values-forecasts): Zillow Home Value Forecast (ZHVF) for all homes, mid-tier, bottom-tier, and top-tier homes.
processed/days_on_market/final.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cf82e9ce68b4ebf991214a7de3fbc8f25de319da470741761d44d11d5cc89f3
3
+ size 230154547
processed/for_sale_listings/final.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38e3f7794b23cfdb27f446d888b6c930078e5fb511311c7d216a248f27c74757
3
+ size 179627939
processed/home_values/final.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e50e8888742d20a9cf36f4dc41aeabaf37933a8c90de9825f160d2e5e37a011
3
+ size 88318760
processed/new_construction/final.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:276b20bd2011faa1fb59f58892ef16b8bbfbb8111a10c7e8d4f433a9226bf3c5
3
+ size 10903095
processed/rentals/final.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8881aca35bd30388f8ce14417b5f6edc4db01dca1be18e8a7e467fcb4258dac
3
+ size 413052557
processed/sales/final.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99077b13eeb65343b0676dcbc58b673265ab88468abc1fc4a7fc161c40f490d7
3
+ size 279576767
processors/days_on_market.ipynb CHANGED
@@ -2,17 +2,19 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 3,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
- "import os"
 
 
11
  ]
12
  },
13
  {
14
  "cell_type": "code",
15
- "execution_count": 5,
16
  "metadata": {},
17
  "outputs": [],
18
  "source": [
@@ -25,7 +27,7 @@
25
  },
26
  {
27
  "cell_type": "code",
28
- "execution_count": 6,
29
  "metadata": {},
30
  "outputs": [
31
  {
@@ -322,7 +324,7 @@
322
  "[586714 rows x 13 columns]"
323
  ]
324
  },
325
- "execution_count": 6,
326
  "metadata": {},
327
  "output_type": "execute_result"
328
  }
@@ -347,52 +349,6 @@
347
  "}\n",
348
  "\n",
349
  "\n",
350
- "def get_df(\n",
351
- " df, exclude_columns, columns_to_pivot, col_name, smoothed, seasonally_adjusted\n",
352
- "):\n",
353
- " if smoothed:\n",
354
- " col_name += \" (Smoothed)\"\n",
355
- " if seasonally_adjusted:\n",
356
- " col_name += \" (Seasonally Adjusted)\"\n",
357
- "\n",
358
- " df = pd.melt(\n",
359
- " df,\n",
360
- " id_vars=exclude_columns,\n",
361
- " value_vars=columns_to_pivot,\n",
362
- " var_name=\"Date\",\n",
363
- " value_name=col_name,\n",
364
- " )\n",
365
- " return df\n",
366
- "\n",
367
- "\n",
368
- "def get_combined_df(data_frames):\n",
369
- " combined_df = None\n",
370
- " if len(data_frames) > 1:\n",
371
- " # iterate over dataframes and merge or concat\n",
372
- " combined_df = data_frames[0]\n",
373
- " for i in range(1, len(data_frames)):\n",
374
- " cur_df = data_frames[i]\n",
375
- " combined_df = pd.merge(\n",
376
- " combined_df,\n",
377
- " cur_df,\n",
378
- " on=[\n",
379
- " \"RegionID\",\n",
380
- " \"SizeRank\",\n",
381
- " \"RegionName\",\n",
382
- " \"RegionType\",\n",
383
- " \"StateName\",\n",
384
- " \"Home Type\",\n",
385
- " \"Date\",\n",
386
- " ],\n",
387
- " how=\"outer\",\n",
388
- " suffixes=(\"\", \"_\" + str(i)),\n",
389
- " )\n",
390
- " elif len(data_frames) == 1:\n",
391
- " combined_df = data_frames[0]\n",
392
- "\n",
393
- " return combined_df\n",
394
- "\n",
395
- "\n",
396
  "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
397
  " if filename.endswith(\".csv\"):\n",
398
  " # print(\"processing \" + filename)\n",
@@ -412,9 +368,6 @@
412
  " # Identify columns to pivot\n",
413
  " columns_to_pivot = [col for col in cur_df.columns if col not in exclude_columns]\n",
414
  "\n",
415
- " smoothed = \"_sm_\" in filename\n",
416
- " seasonally_adjusted = \"_sa_\" in filename\n",
417
- "\n",
418
  " # iterate over slug column mappings and get df\n",
419
  " for slug, col_name in slug_column_mappings.items():\n",
420
  " if slug in filename:\n",
@@ -423,35 +376,36 @@
423
  " exclude_columns,\n",
424
  " columns_to_pivot,\n",
425
  " col_name,\n",
426
- " smoothed,\n",
427
- " seasonally_adjusted,\n",
428
  " )\n",
429
  "\n",
430
  " data_frames.append(cur_df)\n",
431
  " break\n",
432
  "\n",
433
  "\n",
434
- "combined_df = get_combined_df(data_frames)\n",
 
 
 
 
 
 
 
 
 
 
 
435
  "\n",
436
  "columns_to_coalesce = slug_column_mappings.values()\n",
437
- "print(columns_to_coalesce)\n",
438
- "\n",
439
- "for index, row in combined_df.iterrows():\n",
440
- " for col in combined_df.columns:\n",
441
- " for column_to_coalesce in columns_to_coalesce:\n",
442
- " if column_to_coalesce in col and \"_\" in col:\n",
443
- " if not pd.isna(row[col]):\n",
444
- " combined_df.at[index, column_to_coalesce] = row[col]\n",
445
  "\n",
446
- "# remove columns with underscores\n",
447
- "combined_df = combined_df[[col for col in combined_df.columns if \"_\" not in col]]\n",
448
  "\n",
449
  "combined_df"
450
  ]
451
  },
452
  {
453
  "cell_type": "code",
454
- "execution_count": 7,
455
  "metadata": {},
456
  "outputs": [
457
  {
@@ -741,14 +695,14 @@
741
  "[586714 rows x 13 columns]"
742
  ]
743
  },
744
- "execution_count": 7,
745
  "metadata": {},
746
  "output_type": "execute_result"
747
  }
748
  ],
749
  "source": [
750
- "final_df = combined_df\n",
751
- "final_df = final_df.rename(\n",
752
  " columns={\n",
753
  " \"RegionID\": \"Region ID\",\n",
754
  " \"SizeRank\": \"Size Rank\",\n",
@@ -763,14 +717,11 @@
763
  },
764
  {
765
  "cell_type": "code",
766
- "execution_count": 8,
767
  "metadata": {},
768
  "outputs": [],
769
  "source": [
770
- "if not os.path.exists(FULL_PROCESSED_DIR_PATH):\n",
771
- " os.makedirs(FULL_PROCESSED_DIR_PATH)\n",
772
- "\n",
773
- "final_df.to_json(FULL_PROCESSED_DIR_PATH + \"final.jsonl\", orient=\"records\", lines=True)"
774
  ]
775
  }
776
  ],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 5,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
+ "import os\n",
11
+ "\n",
12
+ "from helpers import get_combined_df, coalesce_columns, get_df, save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
16
  "cell_type": "code",
17
+ "execution_count": 6,
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
 
27
  },
28
  {
29
  "cell_type": "code",
30
+ "execution_count": 7,
31
  "metadata": {},
32
  "outputs": [
33
  {
 
324
  "[586714 rows x 13 columns]"
325
  ]
326
  },
327
+ "execution_count": 7,
328
  "metadata": {},
329
  "output_type": "execute_result"
330
  }
 
349
  "}\n",
350
  "\n",
351
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
  "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
353
  " if filename.endswith(\".csv\"):\n",
354
  " # print(\"processing \" + filename)\n",
 
368
  " # Identify columns to pivot\n",
369
  " columns_to_pivot = [col for col in cur_df.columns if col not in exclude_columns]\n",
370
  "\n",
 
 
 
371
  " # iterate over slug column mappings and get df\n",
372
  " for slug, col_name in slug_column_mappings.items():\n",
373
  " if slug in filename:\n",
 
376
  " exclude_columns,\n",
377
  " columns_to_pivot,\n",
378
  " col_name,\n",
379
+ " filename,\n",
 
380
  " )\n",
381
  "\n",
382
  " data_frames.append(cur_df)\n",
383
  " break\n",
384
  "\n",
385
  "\n",
386
+ "combined_df = get_combined_df(\n",
387
+ " data_frames,\n",
388
+ " [\n",
389
+ " \"RegionID\",\n",
390
+ " \"SizeRank\",\n",
391
+ " \"RegionName\",\n",
392
+ " \"RegionType\",\n",
393
+ " \"StateName\",\n",
394
+ " \"Home Type\",\n",
395
+ " \"Date\",\n",
396
+ " ],\n",
397
+ ")\n",
398
  "\n",
399
  "columns_to_coalesce = slug_column_mappings.values()\n",
 
 
 
 
 
 
 
 
400
  "\n",
401
+ "combined_df = coalesce_columns(combined_df, columns_to_coalesce)\n",
 
402
  "\n",
403
  "combined_df"
404
  ]
405
  },
406
  {
407
  "cell_type": "code",
408
+ "execution_count": 8,
409
  "metadata": {},
410
  "outputs": [
411
  {
 
695
  "[586714 rows x 13 columns]"
696
  ]
697
  },
698
+ "execution_count": 8,
699
  "metadata": {},
700
  "output_type": "execute_result"
701
  }
702
  ],
703
  "source": [
704
+ "# Adjust column names\n",
705
+ "final_df = combined_df.rename(\n",
706
  " columns={\n",
707
  " \"RegionID\": \"Region ID\",\n",
708
  " \"SizeRank\": \"Size Rank\",\n",
 
717
  },
718
  {
719
  "cell_type": "code",
720
+ "execution_count": 9,
721
  "metadata": {},
722
  "outputs": [],
723
  "source": [
724
+ "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
 
 
 
725
  ]
726
  }
727
  ],
processors/for_sale_listings.ipynb CHANGED
@@ -2,17 +2,19 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 2,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
- "import os"
 
 
11
  ]
12
  },
13
  {
14
  "cell_type": "code",
15
- "execution_count": 3,
16
  "metadata": {},
17
  "outputs": [],
18
  "source": [
@@ -25,7 +27,7 @@
25
  },
26
  {
27
  "cell_type": "code",
28
- "execution_count": 5,
29
  "metadata": {},
30
  "outputs": [
31
  {
@@ -86,12 +88,12 @@
86
  " <th>StateName</th>\n",
87
  " <th>Home Type</th>\n",
88
  " <th>Date</th>\n",
89
- " <th>New Pending (Smoothed)</th>\n",
90
  " <th>Median Listing Price</th>\n",
91
  " <th>Median Listing Price (Smoothed)</th>\n",
92
- " <th>New Pending</th>\n",
93
  " <th>New Listings</th>\n",
94
  " <th>New Listings (Smoothed)</th>\n",
 
95
  " </tr>\n",
96
  " </thead>\n",
97
  " <tbody>\n",
@@ -104,12 +106,12 @@
104
  " <td>NaN</td>\n",
105
  " <td>SFR</td>\n",
106
  " <td>2018-01-13</td>\n",
107
- " <td>NaN</td>\n",
108
  " <td>259000.0</td>\n",
109
  " <td>NaN</td>\n",
110
  " <td>NaN</td>\n",
111
  " <td>NaN</td>\n",
112
  " <td>NaN</td>\n",
 
113
  " </tr>\n",
114
  " <tr>\n",
115
  " <th>1</th>\n",
@@ -120,12 +122,12 @@
120
  " <td>NaN</td>\n",
121
  " <td>SFR</td>\n",
122
  " <td>2018-01-20</td>\n",
123
- " <td>NaN</td>\n",
124
  " <td>259900.0</td>\n",
125
  " <td>NaN</td>\n",
126
  " <td>NaN</td>\n",
127
  " <td>NaN</td>\n",
128
  " <td>NaN</td>\n",
 
129
  " </tr>\n",
130
  " <tr>\n",
131
  " <th>2</th>\n",
@@ -136,12 +138,12 @@
136
  " <td>NaN</td>\n",
137
  " <td>SFR</td>\n",
138
  " <td>2018-01-27</td>\n",
139
- " <td>NaN</td>\n",
140
  " <td>259900.0</td>\n",
141
  " <td>NaN</td>\n",
142
  " <td>NaN</td>\n",
143
  " <td>NaN</td>\n",
144
  " <td>NaN</td>\n",
 
145
  " </tr>\n",
146
  " <tr>\n",
147
  " <th>3</th>\n",
@@ -151,9 +153,9 @@
151
  " <td>country</td>\n",
152
  " <td>NaN</td>\n",
153
  " <td>SFR</td>\n",
154
- " <td>2018-01-31</td>\n",
155
- " <td>NaN</td>\n",
156
- " <td>254900.0</td>\n",
157
  " <td>NaN</td>\n",
158
  " <td>NaN</td>\n",
159
  " <td>NaN</td>\n",
@@ -167,10 +169,10 @@
167
  " <td>country</td>\n",
168
  " <td>NaN</td>\n",
169
  " <td>SFR</td>\n",
170
- " <td>2018-02-03</td>\n",
 
 
171
  " <td>NaN</td>\n",
172
- " <td>260000.0</td>\n",
173
- " <td>259700.0</td>\n",
174
  " <td>NaN</td>\n",
175
  " <td>NaN</td>\n",
176
  " <td>NaN</td>\n",
@@ -192,71 +194,71 @@
192
  " <td>...</td>\n",
193
  " </tr>\n",
194
  " <tr>\n",
195
- " <th>693656</th>\n",
196
  " <td>845172</td>\n",
197
  " <td>769</td>\n",
198
  " <td>Winfield, KS</td>\n",
199
  " <td>msa</td>\n",
200
  " <td>KS</td>\n",
201
  " <td>all homes</td>\n",
202
- " <td>2023-12-16</td>\n",
 
 
203
  " <td>NaN</td>\n",
204
- " <td>133938.0</td>\n",
205
- " <td>133938.0</td>\n",
206
  " <td>NaN</td>\n",
207
  " <td>NaN</td>\n",
208
  " <td>NaN</td>\n",
209
  " </tr>\n",
210
  " <tr>\n",
211
- " <th>693657</th>\n",
212
  " <td>845172</td>\n",
213
  " <td>769</td>\n",
214
  " <td>Winfield, KS</td>\n",
215
  " <td>msa</td>\n",
216
  " <td>KS</td>\n",
217
  " <td>all homes</td>\n",
218
- " <td>2023-12-23</td>\n",
 
 
219
  " <td>NaN</td>\n",
220
- " <td>126463.0</td>\n",
221
- " <td>126463.0</td>\n",
222
  " <td>NaN</td>\n",
223
  " <td>NaN</td>\n",
224
  " <td>NaN</td>\n",
225
  " </tr>\n",
226
  " <tr>\n",
227
- " <th>693658</th>\n",
228
  " <td>845172</td>\n",
229
  " <td>769</td>\n",
230
  " <td>Winfield, KS</td>\n",
231
  " <td>msa</td>\n",
232
  " <td>KS</td>\n",
233
  " <td>all homes</td>\n",
234
- " <td>2023-12-30</td>\n",
 
 
235
  " <td>NaN</td>\n",
236
- " <td>123225.0</td>\n",
237
- " <td>123225.0</td>\n",
238
  " <td>NaN</td>\n",
239
  " <td>NaN</td>\n",
240
  " <td>NaN</td>\n",
241
  " </tr>\n",
242
  " <tr>\n",
243
- " <th>693659</th>\n",
244
  " <td>845172</td>\n",
245
  " <td>769</td>\n",
246
  " <td>Winfield, KS</td>\n",
247
  " <td>msa</td>\n",
248
  " <td>KS</td>\n",
249
  " <td>all homes</td>\n",
250
- " <td>2023-12-31</td>\n",
251
- " <td>24.0</td>\n",
252
- " <td>136233.0</td>\n",
253
- " <td>136233.0</td>\n",
254
- " <td>24.0</td>\n",
255
- " <td>28.0</td>\n",
256
- " <td>28.0</td>\n",
257
  " </tr>\n",
258
  " <tr>\n",
259
- " <th>693660</th>\n",
260
  " <td>845172</td>\n",
261
  " <td>769</td>\n",
262
  " <td>Winfield, KS</td>\n",
@@ -264,16 +266,16 @@
264
  " <td>KS</td>\n",
265
  " <td>all homes</td>\n",
266
  " <td>2024-01-06</td>\n",
267
- " <td>NaN</td>\n",
268
- " <td>121488.0</td>\n",
269
  " <td>121488.0</td>\n",
270
  " <td>NaN</td>\n",
271
  " <td>NaN</td>\n",
272
  " <td>NaN</td>\n",
 
273
  " </tr>\n",
274
  " </tbody>\n",
275
  "</table>\n",
276
- "<p>693661 rows × 13 columns</p>\n",
277
  "</div>"
278
  ],
279
  "text/plain": [
@@ -284,55 +286,55 @@
284
  "3 102001 0 United States country NaN SFR \n",
285
  "4 102001 0 United States country NaN SFR \n",
286
  "... ... ... ... ... ... ... \n",
287
- "693656 845172 769 Winfield, KS msa KS all homes \n",
288
- "693657 845172 769 Winfield, KS msa KS all homes \n",
289
- "693658 845172 769 Winfield, KS msa KS all homes \n",
290
- "693659 845172 769 Winfield, KS msa KS all homes \n",
291
- "693660 845172 769 Winfield, KS msa KS all homes \n",
292
  "\n",
293
- " Date New Pending (Smoothed) Median Listing Price \\\n",
294
- "0 2018-01-13 NaN 259000.0 \n",
295
- "1 2018-01-20 NaN 259900.0 \n",
296
- "2 2018-01-27 NaN 259900.0 \n",
297
- "3 2018-01-31 NaN 254900.0 \n",
298
- "4 2018-02-03 NaN 260000.0 \n",
299
- "... ... ... ... \n",
300
- "693656 2023-12-16 NaN 133938.0 \n",
301
- "693657 2023-12-23 NaN 126463.0 \n",
302
- "693658 2023-12-30 NaN 123225.0 \n",
303
- "693659 2023-12-31 24.0 136233.0 \n",
304
- "693660 2024-01-06 NaN 121488.0 \n",
305
  "\n",
306
- " Median Listing Price (Smoothed) New Pending New Listings \\\n",
307
- "0 NaN NaN NaN \n",
308
- "1 NaN NaN NaN \n",
309
- "2 NaN NaN NaN \n",
310
- "3 NaN NaN NaN \n",
311
- "4 259700.0 NaN NaN \n",
312
- "... ... ... ... \n",
313
- "693656 133938.0 NaN NaN \n",
314
- "693657 126463.0 NaN NaN \n",
315
- "693658 123225.0 NaN NaN \n",
316
- "693659 136233.0 24.0 28.0 \n",
317
- "693660 121488.0 NaN NaN \n",
318
  "\n",
319
- " New Listings (Smoothed) \n",
320
- "0 NaN \n",
321
- "1 NaN \n",
322
- "2 NaN \n",
323
- "3 NaN \n",
324
- "4 NaN \n",
325
- "... ... \n",
326
- "693656 NaN \n",
327
- "693657 NaN \n",
328
- "693658 NaN \n",
329
- "693659 28.0 \n",
330
- "693660 NaN \n",
331
  "\n",
332
- "[693661 rows x 13 columns]"
333
  ]
334
  },
335
- "execution_count": 5,
336
  "metadata": {},
337
  "output_type": "execute_result"
338
  }
@@ -349,6 +351,13 @@
349
  " \"Home Type\",\n",
350
  "]\n",
351
  "\n",
 
 
 
 
 
 
 
352
  "data_frames = []\n",
353
  "\n",
354
  "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
@@ -357,7 +366,7 @@
357
  " cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))\n",
358
  "\n",
359
  " # ignore monthly data for now since it is redundant\n",
360
- " if \"monthly\" in filename:\n",
361
  " continue\n",
362
  "\n",
363
  " if \"sfrcondo\" in filename:\n",
@@ -370,84 +379,32 @@
370
  " # Identify columns to pivot\n",
371
  " columns_to_pivot = [col for col in cur_df.columns if col not in exclude_columns]\n",
372
  "\n",
373
- " smoothed = \"_sm_\" in filename\n",
374
- "\n",
375
- " if \"_mlp_\" in filename:\n",
376
- " cur_df = pd.melt(\n",
377
- " cur_df,\n",
378
- " id_vars=exclude_columns,\n",
379
- " value_vars=columns_to_pivot,\n",
380
- " var_name=\"Date\",\n",
381
- " value_name=(\n",
382
- " \"Median Listing Price\"\n",
383
- " if not smoothed\n",
384
- " else \"Median Listing Price (Smoothed)\"\n",
385
- " ),\n",
386
- " )\n",
387
- " data_frames.append(cur_df)\n",
388
  "\n",
389
- " elif \"_new_listings_\" in filename:\n",
390
- " cur_df = pd.melt(\n",
391
- " cur_df,\n",
392
- " id_vars=exclude_columns,\n",
393
- " value_vars=columns_to_pivot,\n",
394
- " var_name=\"Date\",\n",
395
- " value_name=(\n",
396
- " \"New Listings\" if not smoothed else \"New Listings (Smoothed)\"\n",
397
- " ),\n",
398
- " )\n",
399
- " data_frames.append(cur_df)\n",
400
  "\n",
401
- " elif \"new_pending\" in filename:\n",
402
- " cur_df = pd.melt(\n",
403
- " cur_df,\n",
404
- " id_vars=exclude_columns,\n",
405
- " value_vars=columns_to_pivot,\n",
406
- " var_name=\"Date\",\n",
407
- " value_name=\"New Pending\" if not smoothed else \"New Pending (Smoothed)\",\n",
408
- " )\n",
409
- " data_frames.append(cur_df)\n",
410
  "\n",
411
- "matching_cols = [\n",
412
- " \"RegionID\",\n",
413
- " \"Date\",\n",
414
- " \"SizeRank\",\n",
415
- " \"RegionName\",\n",
416
- " \"RegionType\",\n",
417
- " \"StateName\",\n",
418
- " \"Home Type\",\n",
419
- "]\n",
420
- "\n",
421
- "\n",
422
- "def get_combined_df(data_frames):\n",
423
- " combined_df = None\n",
424
- " if len(data_frames) > 1:\n",
425
- " # iterate over dataframes and merge or concat\n",
426
- " combined_df = data_frames[0]\n",
427
- " for i in range(1, len(data_frames)):\n",
428
- " cur_df = data_frames[i]\n",
429
- " combined_df = pd.merge(\n",
430
- " combined_df,\n",
431
- " cur_df,\n",
432
- " on=[\n",
433
- " \"RegionID\",\n",
434
- " \"SizeRank\",\n",
435
- " \"RegionName\",\n",
436
- " \"RegionType\",\n",
437
- " \"StateName\",\n",
438
- " \"Home Type\",\n",
439
- " \"Date\",\n",
440
- " ],\n",
441
- " suffixes=(\"\", \"_\" + str(i)),\n",
442
- " how=\"outer\",\n",
443
- " )\n",
444
- " elif len(data_frames) == 1:\n",
445
- " combined_df = data_frames[0]\n",
446
- "\n",
447
- " return combined_df\n",
448
- "\n",
449
- "\n",
450
- "combined_df = get_combined_df(data_frames)\n",
451
  "\n",
452
  "\n",
453
  "# iterate over rows of combined df and coalesce column values across columns that start with \"Median Sale Price\"\n",
@@ -460,22 +417,14 @@
460
  " \"New Pending\",\n",
461
  "]\n",
462
  "\n",
463
- "for index, row in combined_df.iterrows():\n",
464
- " for col in combined_df.columns:\n",
465
- " for column_to_coalesce in columns_to_coalesce:\n",
466
- " if column_to_coalesce in col and \"_\" in col:\n",
467
- " if not pd.isna(row[col]):\n",
468
- " combined_df.at[index, column_to_coalesce] = row[col]\n",
469
- "\n",
470
- "# remove columns with underscores\n",
471
- "combined_df = combined_df[[col for col in combined_df.columns if \"_\" not in col]]\n",
472
  "\n",
473
  "combined_df"
474
  ]
475
  },
476
  {
477
  "cell_type": "code",
478
- "execution_count": 6,
479
  "metadata": {},
480
  "outputs": [
481
  {
@@ -506,12 +455,12 @@
506
  " <th>State</th>\n",
507
  " <th>Home Type</th>\n",
508
  " <th>Date</th>\n",
509
- " <th>New Pending (Smoothed)</th>\n",
510
  " <th>Median Listing Price</th>\n",
511
  " <th>Median Listing Price (Smoothed)</th>\n",
512
- " <th>New Pending</th>\n",
513
  " <th>New Listings</th>\n",
514
  " <th>New Listings (Smoothed)</th>\n",
 
515
  " </tr>\n",
516
  " </thead>\n",
517
  " <tbody>\n",
@@ -524,12 +473,12 @@
524
  " <td>NaN</td>\n",
525
  " <td>SFR</td>\n",
526
  " <td>2018-01-13</td>\n",
527
- " <td>NaN</td>\n",
528
  " <td>259000.0</td>\n",
529
  " <td>NaN</td>\n",
530
  " <td>NaN</td>\n",
531
  " <td>NaN</td>\n",
532
  " <td>NaN</td>\n",
 
533
  " </tr>\n",
534
  " <tr>\n",
535
  " <th>1</th>\n",
@@ -540,12 +489,12 @@
540
  " <td>NaN</td>\n",
541
  " <td>SFR</td>\n",
542
  " <td>2018-01-20</td>\n",
543
- " <td>NaN</td>\n",
544
  " <td>259900.0</td>\n",
545
  " <td>NaN</td>\n",
546
  " <td>NaN</td>\n",
547
  " <td>NaN</td>\n",
548
  " <td>NaN</td>\n",
 
549
  " </tr>\n",
550
  " <tr>\n",
551
  " <th>2</th>\n",
@@ -556,12 +505,12 @@
556
  " <td>NaN</td>\n",
557
  " <td>SFR</td>\n",
558
  " <td>2018-01-27</td>\n",
559
- " <td>NaN</td>\n",
560
  " <td>259900.0</td>\n",
561
  " <td>NaN</td>\n",
562
  " <td>NaN</td>\n",
563
  " <td>NaN</td>\n",
564
  " <td>NaN</td>\n",
 
565
  " </tr>\n",
566
  " <tr>\n",
567
  " <th>3</th>\n",
@@ -571,9 +520,9 @@
571
  " <td>country</td>\n",
572
  " <td>NaN</td>\n",
573
  " <td>SFR</td>\n",
574
- " <td>2018-01-31</td>\n",
575
- " <td>NaN</td>\n",
576
- " <td>254900.0</td>\n",
577
  " <td>NaN</td>\n",
578
  " <td>NaN</td>\n",
579
  " <td>NaN</td>\n",
@@ -587,10 +536,10 @@
587
  " <td>country</td>\n",
588
  " <td>NaN</td>\n",
589
  " <td>SFR</td>\n",
590
- " <td>2018-02-03</td>\n",
 
 
591
  " <td>NaN</td>\n",
592
- " <td>260000.0</td>\n",
593
- " <td>259700.0</td>\n",
594
  " <td>NaN</td>\n",
595
  " <td>NaN</td>\n",
596
  " <td>NaN</td>\n",
@@ -612,71 +561,71 @@
612
  " <td>...</td>\n",
613
  " </tr>\n",
614
  " <tr>\n",
615
- " <th>693656</th>\n",
616
  " <td>845172</td>\n",
617
  " <td>769</td>\n",
618
  " <td>Winfield, KS</td>\n",
619
  " <td>msa</td>\n",
620
  " <td>KS</td>\n",
621
  " <td>all homes</td>\n",
622
- " <td>2023-12-16</td>\n",
 
 
623
  " <td>NaN</td>\n",
624
- " <td>133938.0</td>\n",
625
- " <td>133938.0</td>\n",
626
  " <td>NaN</td>\n",
627
  " <td>NaN</td>\n",
628
  " <td>NaN</td>\n",
629
  " </tr>\n",
630
  " <tr>\n",
631
- " <th>693657</th>\n",
632
  " <td>845172</td>\n",
633
  " <td>769</td>\n",
634
  " <td>Winfield, KS</td>\n",
635
  " <td>msa</td>\n",
636
  " <td>KS</td>\n",
637
  " <td>all homes</td>\n",
638
- " <td>2023-12-23</td>\n",
 
 
639
  " <td>NaN</td>\n",
640
- " <td>126463.0</td>\n",
641
- " <td>126463.0</td>\n",
642
  " <td>NaN</td>\n",
643
  " <td>NaN</td>\n",
644
  " <td>NaN</td>\n",
645
  " </tr>\n",
646
  " <tr>\n",
647
- " <th>693658</th>\n",
648
  " <td>845172</td>\n",
649
  " <td>769</td>\n",
650
  " <td>Winfield, KS</td>\n",
651
  " <td>msa</td>\n",
652
  " <td>KS</td>\n",
653
  " <td>all homes</td>\n",
654
- " <td>2023-12-30</td>\n",
 
 
655
  " <td>NaN</td>\n",
656
- " <td>123225.0</td>\n",
657
- " <td>123225.0</td>\n",
658
  " <td>NaN</td>\n",
659
  " <td>NaN</td>\n",
660
  " <td>NaN</td>\n",
661
  " </tr>\n",
662
  " <tr>\n",
663
- " <th>693659</th>\n",
664
  " <td>845172</td>\n",
665
  " <td>769</td>\n",
666
  " <td>Winfield, KS</td>\n",
667
  " <td>msa</td>\n",
668
  " <td>KS</td>\n",
669
  " <td>all homes</td>\n",
670
- " <td>2023-12-31</td>\n",
671
- " <td>24.0</td>\n",
672
- " <td>136233.0</td>\n",
673
- " <td>136233.0</td>\n",
674
- " <td>24.0</td>\n",
675
- " <td>28.0</td>\n",
676
- " <td>28.0</td>\n",
677
  " </tr>\n",
678
  " <tr>\n",
679
- " <th>693660</th>\n",
680
  " <td>845172</td>\n",
681
  " <td>769</td>\n",
682
  " <td>Winfield, KS</td>\n",
@@ -684,16 +633,16 @@
684
  " <td>KS</td>\n",
685
  " <td>all homes</td>\n",
686
  " <td>2024-01-06</td>\n",
687
- " <td>NaN</td>\n",
688
- " <td>121488.0</td>\n",
689
  " <td>121488.0</td>\n",
690
  " <td>NaN</td>\n",
691
  " <td>NaN</td>\n",
692
  " <td>NaN</td>\n",
 
693
  " </tr>\n",
694
  " </tbody>\n",
695
  "</table>\n",
696
- "<p>693661 rows × 13 columns</p>\n",
697
  "</div>"
698
  ],
699
  "text/plain": [
@@ -704,62 +653,62 @@
704
  "3 102001 0 United States country NaN SFR \n",
705
  "4 102001 0 United States country NaN SFR \n",
706
  "... ... ... ... ... ... ... \n",
707
- "693656 845172 769 Winfield, KS msa KS all homes \n",
708
- "693657 845172 769 Winfield, KS msa KS all homes \n",
709
- "693658 845172 769 Winfield, KS msa KS all homes \n",
710
- "693659 845172 769 Winfield, KS msa KS all homes \n",
711
- "693660 845172 769 Winfield, KS msa KS all homes \n",
712
  "\n",
713
- " Date New Pending (Smoothed) Median Listing Price \\\n",
714
- "0 2018-01-13 NaN 259000.0 \n",
715
- "1 2018-01-20 NaN 259900.0 \n",
716
- "2 2018-01-27 NaN 259900.0 \n",
717
- "3 2018-01-31 NaN 254900.0 \n",
718
- "4 2018-02-03 NaN 260000.0 \n",
719
- "... ... ... ... \n",
720
- "693656 2023-12-16 NaN 133938.0 \n",
721
- "693657 2023-12-23 NaN 126463.0 \n",
722
- "693658 2023-12-30 NaN 123225.0 \n",
723
- "693659 2023-12-31 24.0 136233.0 \n",
724
- "693660 2024-01-06 NaN 121488.0 \n",
725
  "\n",
726
- " Median Listing Price (Smoothed) New Pending New Listings \\\n",
727
- "0 NaN NaN NaN \n",
728
- "1 NaN NaN NaN \n",
729
- "2 NaN NaN NaN \n",
730
- "3 NaN NaN NaN \n",
731
- "4 259700.0 NaN NaN \n",
732
- "... ... ... ... \n",
733
- "693656 133938.0 NaN NaN \n",
734
- "693657 126463.0 NaN NaN \n",
735
- "693658 123225.0 NaN NaN \n",
736
- "693659 136233.0 24.0 28.0 \n",
737
- "693660 121488.0 NaN NaN \n",
738
  "\n",
739
- " New Listings (Smoothed) \n",
740
- "0 NaN \n",
741
- "1 NaN \n",
742
- "2 NaN \n",
743
- "3 NaN \n",
744
- "4 NaN \n",
745
- "... ... \n",
746
- "693656 NaN \n",
747
- "693657 NaN \n",
748
- "693658 NaN \n",
749
- "693659 28.0 \n",
750
- "693660 NaN \n",
751
  "\n",
752
- "[693661 rows x 13 columns]"
753
  ]
754
  },
755
- "execution_count": 6,
756
  "metadata": {},
757
  "output_type": "execute_result"
758
  }
759
  ],
760
  "source": [
761
- "final_df = combined_df\n",
762
- "final_df = final_df.rename(\n",
763
  " columns={\n",
764
  " \"RegionID\": \"Region ID\",\n",
765
  " \"SizeRank\": \"Size Rank\",\n",
@@ -774,14 +723,11 @@
774
  },
775
  {
776
  "cell_type": "code",
777
- "execution_count": 7,
778
  "metadata": {},
779
  "outputs": [],
780
  "source": [
781
- "if not os.path.exists(FULL_PROCESSED_DIR_PATH):\n",
782
- " os.makedirs(FULL_PROCESSED_DIR_PATH)\n",
783
- "\n",
784
- "final_df.to_json(FULL_PROCESSED_DIR_PATH + \"final.jsonl\", orient=\"records\", lines=True)"
785
  ]
786
  }
787
  ],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
+ "import os\n",
11
+ "\n",
12
+ "from helpers import get_combined_df, coalesce_columns, get_df, save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
16
  "cell_type": "code",
17
+ "execution_count": 2,
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
 
27
  },
28
  {
29
  "cell_type": "code",
30
+ "execution_count": 3,
31
  "metadata": {},
32
  "outputs": [
33
  {
 
88
  " <th>StateName</th>\n",
89
  " <th>Home Type</th>\n",
90
  " <th>Date</th>\n",
 
91
  " <th>Median Listing Price</th>\n",
92
  " <th>Median Listing Price (Smoothed)</th>\n",
93
+ " <th>New Pending (Smoothed)</th>\n",
94
  " <th>New Listings</th>\n",
95
  " <th>New Listings (Smoothed)</th>\n",
96
+ " <th>New Pending</th>\n",
97
  " </tr>\n",
98
  " </thead>\n",
99
  " <tbody>\n",
 
106
  " <td>NaN</td>\n",
107
  " <td>SFR</td>\n",
108
  " <td>2018-01-13</td>\n",
 
109
  " <td>259000.0</td>\n",
110
  " <td>NaN</td>\n",
111
  " <td>NaN</td>\n",
112
  " <td>NaN</td>\n",
113
  " <td>NaN</td>\n",
114
+ " <td>NaN</td>\n",
115
  " </tr>\n",
116
  " <tr>\n",
117
  " <th>1</th>\n",
 
122
  " <td>NaN</td>\n",
123
  " <td>SFR</td>\n",
124
  " <td>2018-01-20</td>\n",
 
125
  " <td>259900.0</td>\n",
126
  " <td>NaN</td>\n",
127
  " <td>NaN</td>\n",
128
  " <td>NaN</td>\n",
129
  " <td>NaN</td>\n",
130
+ " <td>NaN</td>\n",
131
  " </tr>\n",
132
  " <tr>\n",
133
  " <th>2</th>\n",
 
138
  " <td>NaN</td>\n",
139
  " <td>SFR</td>\n",
140
  " <td>2018-01-27</td>\n",
 
141
  " <td>259900.0</td>\n",
142
  " <td>NaN</td>\n",
143
  " <td>NaN</td>\n",
144
  " <td>NaN</td>\n",
145
  " <td>NaN</td>\n",
146
+ " <td>NaN</td>\n",
147
  " </tr>\n",
148
  " <tr>\n",
149
  " <th>3</th>\n",
 
153
  " <td>country</td>\n",
154
  " <td>NaN</td>\n",
155
  " <td>SFR</td>\n",
156
+ " <td>2018-02-03</td>\n",
157
+ " <td>260000.0</td>\n",
158
+ " <td>259700.0</td>\n",
159
  " <td>NaN</td>\n",
160
  " <td>NaN</td>\n",
161
  " <td>NaN</td>\n",
 
169
  " <td>country</td>\n",
170
  " <td>NaN</td>\n",
171
  " <td>SFR</td>\n",
172
+ " <td>2018-02-10</td>\n",
173
+ " <td>264900.0</td>\n",
174
+ " <td>261175.0</td>\n",
175
  " <td>NaN</td>\n",
 
 
176
  " <td>NaN</td>\n",
177
  " <td>NaN</td>\n",
178
  " <td>NaN</td>\n",
 
194
  " <td>...</td>\n",
195
  " </tr>\n",
196
  " <tr>\n",
197
+ " <th>578648</th>\n",
198
  " <td>845172</td>\n",
199
  " <td>769</td>\n",
200
  " <td>Winfield, KS</td>\n",
201
  " <td>msa</td>\n",
202
  " <td>KS</td>\n",
203
  " <td>all homes</td>\n",
204
+ " <td>2023-12-09</td>\n",
205
+ " <td>134950.0</td>\n",
206
+ " <td>138913.0</td>\n",
207
  " <td>NaN</td>\n",
 
 
208
  " <td>NaN</td>\n",
209
  " <td>NaN</td>\n",
210
  " <td>NaN</td>\n",
211
  " </tr>\n",
212
  " <tr>\n",
213
+ " <th>578649</th>\n",
214
  " <td>845172</td>\n",
215
  " <td>769</td>\n",
216
  " <td>Winfield, KS</td>\n",
217
  " <td>msa</td>\n",
218
  " <td>KS</td>\n",
219
  " <td>all homes</td>\n",
220
+ " <td>2023-12-16</td>\n",
221
+ " <td>120000.0</td>\n",
222
+ " <td>133938.0</td>\n",
223
  " <td>NaN</td>\n",
 
 
224
  " <td>NaN</td>\n",
225
  " <td>NaN</td>\n",
226
  " <td>NaN</td>\n",
227
  " </tr>\n",
228
  " <tr>\n",
229
+ " <th>578650</th>\n",
230
  " <td>845172</td>\n",
231
  " <td>769</td>\n",
232
  " <td>Winfield, KS</td>\n",
233
  " <td>msa</td>\n",
234
  " <td>KS</td>\n",
235
  " <td>all homes</td>\n",
236
+ " <td>2023-12-23</td>\n",
237
+ " <td>111000.0</td>\n",
238
+ " <td>126463.0</td>\n",
239
  " <td>NaN</td>\n",
 
 
240
  " <td>NaN</td>\n",
241
  " <td>NaN</td>\n",
242
  " <td>NaN</td>\n",
243
  " </tr>\n",
244
  " <tr>\n",
245
+ " <th>578651</th>\n",
246
  " <td>845172</td>\n",
247
  " <td>769</td>\n",
248
  " <td>Winfield, KS</td>\n",
249
  " <td>msa</td>\n",
250
  " <td>KS</td>\n",
251
  " <td>all homes</td>\n",
252
+ " <td>2023-12-30</td>\n",
253
+ " <td>126950.0</td>\n",
254
+ " <td>123225.0</td>\n",
255
+ " <td>NaN</td>\n",
256
+ " <td>NaN</td>\n",
257
+ " <td>NaN</td>\n",
258
+ " <td>NaN</td>\n",
259
  " </tr>\n",
260
  " <tr>\n",
261
+ " <th>578652</th>\n",
262
  " <td>845172</td>\n",
263
  " <td>769</td>\n",
264
  " <td>Winfield, KS</td>\n",
 
266
  " <td>KS</td>\n",
267
  " <td>all homes</td>\n",
268
  " <td>2024-01-06</td>\n",
269
+ " <td>128000.0</td>\n",
 
270
  " <td>121488.0</td>\n",
271
  " <td>NaN</td>\n",
272
  " <td>NaN</td>\n",
273
  " <td>NaN</td>\n",
274
+ " <td>NaN</td>\n",
275
  " </tr>\n",
276
  " </tbody>\n",
277
  "</table>\n",
278
+ "<p>578653 rows × 13 columns</p>\n",
279
  "</div>"
280
  ],
281
  "text/plain": [
 
286
  "3 102001 0 United States country NaN SFR \n",
287
  "4 102001 0 United States country NaN SFR \n",
288
  "... ... ... ... ... ... ... \n",
289
+ "578648 845172 769 Winfield, KS msa KS all homes \n",
290
+ "578649 845172 769 Winfield, KS msa KS all homes \n",
291
+ "578650 845172 769 Winfield, KS msa KS all homes \n",
292
+ "578651 845172 769 Winfield, KS msa KS all homes \n",
293
+ "578652 845172 769 Winfield, KS msa KS all homes \n",
294
  "\n",
295
+ " Date Median Listing Price Median Listing Price (Smoothed) \\\n",
296
+ "0 2018-01-13 259000.0 NaN \n",
297
+ "1 2018-01-20 259900.0 NaN \n",
298
+ "2 2018-01-27 259900.0 NaN \n",
299
+ "3 2018-02-03 260000.0 259700.0 \n",
300
+ "4 2018-02-10 264900.0 261175.0 \n",
301
+ "... ... ... ... \n",
302
+ "578648 2023-12-09 134950.0 138913.0 \n",
303
+ "578649 2023-12-16 120000.0 133938.0 \n",
304
+ "578650 2023-12-23 111000.0 126463.0 \n",
305
+ "578651 2023-12-30 126950.0 123225.0 \n",
306
+ "578652 2024-01-06 128000.0 121488.0 \n",
307
  "\n",
308
+ " New Pending (Smoothed) New Listings New Listings (Smoothed) \\\n",
309
+ "0 NaN NaN NaN \n",
310
+ "1 NaN NaN NaN \n",
311
+ "2 NaN NaN NaN \n",
312
+ "3 NaN NaN NaN \n",
313
+ "4 NaN NaN NaN \n",
314
+ "... ... ... ... \n",
315
+ "578648 NaN NaN NaN \n",
316
+ "578649 NaN NaN NaN \n",
317
+ "578650 NaN NaN NaN \n",
318
+ "578651 NaN NaN NaN \n",
319
+ "578652 NaN NaN NaN \n",
320
  "\n",
321
+ " New Pending \n",
322
+ "0 NaN \n",
323
+ "1 NaN \n",
324
+ "2 NaN \n",
325
+ "3 NaN \n",
326
+ "4 NaN \n",
327
+ "... ... \n",
328
+ "578648 NaN \n",
329
+ "578649 NaN \n",
330
+ "578650 NaN \n",
331
+ "578651 NaN \n",
332
+ "578652 NaN \n",
333
  "\n",
334
+ "[578653 rows x 13 columns]"
335
  ]
336
  },
337
+ "execution_count": 3,
338
  "metadata": {},
339
  "output_type": "execute_result"
340
  }
 
351
  " \"Home Type\",\n",
352
  "]\n",
353
  "\n",
354
+ "slug_column_mappings = {\n",
355
+ " \"_mlp_\": \"Median Listing Price\",\n",
356
+ " \"_new_listings_\": \"New Listings\",\n",
357
+ " \"new_pending\": \"New Pending\",\n",
358
+ "}\n",
359
+ "\n",
360
+ "\n",
361
  "data_frames = []\n",
362
  "\n",
363
  "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
 
366
  " cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))\n",
367
  "\n",
368
  " # ignore monthly data for now since it is redundant\n",
369
+ " if \"month\" in filename:\n",
370
  " continue\n",
371
  "\n",
372
  " if \"sfrcondo\" in filename:\n",
 
379
  " # Identify columns to pivot\n",
380
  " columns_to_pivot = [col for col in cur_df.columns if col not in exclude_columns]\n",
381
  "\n",
382
+ " for slug, col_name in slug_column_mappings.items():\n",
383
+ " if slug in filename:\n",
384
+ " cur_df = get_df(\n",
385
+ " cur_df,\n",
386
+ " exclude_columns,\n",
387
+ " columns_to_pivot,\n",
388
+ " col_name,\n",
389
+ " filename,\n",
390
+ " )\n",
 
 
 
 
 
 
391
  "\n",
392
+ " data_frames.append(cur_df)\n",
393
+ " break\n",
 
 
 
 
 
 
 
 
 
394
  "\n",
 
 
 
 
 
 
 
 
 
395
  "\n",
396
+ "combined_df = get_combined_df(\n",
397
+ " data_frames,\n",
398
+ " [\n",
399
+ " \"RegionID\",\n",
400
+ " \"SizeRank\",\n",
401
+ " \"RegionName\",\n",
402
+ " \"RegionType\",\n",
403
+ " \"StateName\",\n",
404
+ " \"Home Type\",\n",
405
+ " \"Date\",\n",
406
+ " ],\n",
407
+ ")\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
  "\n",
409
  "\n",
410
  "# iterate over rows of combined df and coalesce column values across columns that start with \"Median Sale Price\"\n",
 
417
  " \"New Pending\",\n",
418
  "]\n",
419
  "\n",
420
+ "combined_df = coalesce_columns(combined_df, columns_to_coalesce)\n",
 
 
 
 
 
 
 
 
421
  "\n",
422
  "combined_df"
423
  ]
424
  },
425
  {
426
  "cell_type": "code",
427
+ "execution_count": 4,
428
  "metadata": {},
429
  "outputs": [
430
  {
 
455
  " <th>State</th>\n",
456
  " <th>Home Type</th>\n",
457
  " <th>Date</th>\n",
 
458
  " <th>Median Listing Price</th>\n",
459
  " <th>Median Listing Price (Smoothed)</th>\n",
460
+ " <th>New Pending (Smoothed)</th>\n",
461
  " <th>New Listings</th>\n",
462
  " <th>New Listings (Smoothed)</th>\n",
463
+ " <th>New Pending</th>\n",
464
  " </tr>\n",
465
  " </thead>\n",
466
  " <tbody>\n",
 
473
  " <td>NaN</td>\n",
474
  " <td>SFR</td>\n",
475
  " <td>2018-01-13</td>\n",
 
476
  " <td>259000.0</td>\n",
477
  " <td>NaN</td>\n",
478
  " <td>NaN</td>\n",
479
  " <td>NaN</td>\n",
480
  " <td>NaN</td>\n",
481
+ " <td>NaN</td>\n",
482
  " </tr>\n",
483
  " <tr>\n",
484
  " <th>1</th>\n",
 
489
  " <td>NaN</td>\n",
490
  " <td>SFR</td>\n",
491
  " <td>2018-01-20</td>\n",
 
492
  " <td>259900.0</td>\n",
493
  " <td>NaN</td>\n",
494
  " <td>NaN</td>\n",
495
  " <td>NaN</td>\n",
496
  " <td>NaN</td>\n",
497
+ " <td>NaN</td>\n",
498
  " </tr>\n",
499
  " <tr>\n",
500
  " <th>2</th>\n",
 
505
  " <td>NaN</td>\n",
506
  " <td>SFR</td>\n",
507
  " <td>2018-01-27</td>\n",
 
508
  " <td>259900.0</td>\n",
509
  " <td>NaN</td>\n",
510
  " <td>NaN</td>\n",
511
  " <td>NaN</td>\n",
512
  " <td>NaN</td>\n",
513
+ " <td>NaN</td>\n",
514
  " </tr>\n",
515
  " <tr>\n",
516
  " <th>3</th>\n",
 
520
  " <td>country</td>\n",
521
  " <td>NaN</td>\n",
522
  " <td>SFR</td>\n",
523
+ " <td>2018-02-03</td>\n",
524
+ " <td>260000.0</td>\n",
525
+ " <td>259700.0</td>\n",
526
  " <td>NaN</td>\n",
527
  " <td>NaN</td>\n",
528
  " <td>NaN</td>\n",
 
536
  " <td>country</td>\n",
537
  " <td>NaN</td>\n",
538
  " <td>SFR</td>\n",
539
+ " <td>2018-02-10</td>\n",
540
+ " <td>264900.0</td>\n",
541
+ " <td>261175.0</td>\n",
542
  " <td>NaN</td>\n",
 
 
543
  " <td>NaN</td>\n",
544
  " <td>NaN</td>\n",
545
  " <td>NaN</td>\n",
 
561
  " <td>...</td>\n",
562
  " </tr>\n",
563
  " <tr>\n",
564
+ " <th>578648</th>\n",
565
  " <td>845172</td>\n",
566
  " <td>769</td>\n",
567
  " <td>Winfield, KS</td>\n",
568
  " <td>msa</td>\n",
569
  " <td>KS</td>\n",
570
  " <td>all homes</td>\n",
571
+ " <td>2023-12-09</td>\n",
572
+ " <td>134950.0</td>\n",
573
+ " <td>138913.0</td>\n",
574
  " <td>NaN</td>\n",
 
 
575
  " <td>NaN</td>\n",
576
  " <td>NaN</td>\n",
577
  " <td>NaN</td>\n",
578
  " </tr>\n",
579
  " <tr>\n",
580
+ " <th>578649</th>\n",
581
  " <td>845172</td>\n",
582
  " <td>769</td>\n",
583
  " <td>Winfield, KS</td>\n",
584
  " <td>msa</td>\n",
585
  " <td>KS</td>\n",
586
  " <td>all homes</td>\n",
587
+ " <td>2023-12-16</td>\n",
588
+ " <td>120000.0</td>\n",
589
+ " <td>133938.0</td>\n",
590
  " <td>NaN</td>\n",
 
 
591
  " <td>NaN</td>\n",
592
  " <td>NaN</td>\n",
593
  " <td>NaN</td>\n",
594
  " </tr>\n",
595
  " <tr>\n",
596
+ " <th>578650</th>\n",
597
  " <td>845172</td>\n",
598
  " <td>769</td>\n",
599
  " <td>Winfield, KS</td>\n",
600
  " <td>msa</td>\n",
601
  " <td>KS</td>\n",
602
  " <td>all homes</td>\n",
603
+ " <td>2023-12-23</td>\n",
604
+ " <td>111000.0</td>\n",
605
+ " <td>126463.0</td>\n",
606
  " <td>NaN</td>\n",
 
 
607
  " <td>NaN</td>\n",
608
  " <td>NaN</td>\n",
609
  " <td>NaN</td>\n",
610
  " </tr>\n",
611
  " <tr>\n",
612
+ " <th>578651</th>\n",
613
  " <td>845172</td>\n",
614
  " <td>769</td>\n",
615
  " <td>Winfield, KS</td>\n",
616
  " <td>msa</td>\n",
617
  " <td>KS</td>\n",
618
  " <td>all homes</td>\n",
619
+ " <td>2023-12-30</td>\n",
620
+ " <td>126950.0</td>\n",
621
+ " <td>123225.0</td>\n",
622
+ " <td>NaN</td>\n",
623
+ " <td>NaN</td>\n",
624
+ " <td>NaN</td>\n",
625
+ " <td>NaN</td>\n",
626
  " </tr>\n",
627
  " <tr>\n",
628
+ " <th>578652</th>\n",
629
  " <td>845172</td>\n",
630
  " <td>769</td>\n",
631
  " <td>Winfield, KS</td>\n",
 
633
  " <td>KS</td>\n",
634
  " <td>all homes</td>\n",
635
  " <td>2024-01-06</td>\n",
636
+ " <td>128000.0</td>\n",
 
637
  " <td>121488.0</td>\n",
638
  " <td>NaN</td>\n",
639
  " <td>NaN</td>\n",
640
  " <td>NaN</td>\n",
641
+ " <td>NaN</td>\n",
642
  " </tr>\n",
643
  " </tbody>\n",
644
  "</table>\n",
645
+ "<p>578653 rows × 13 columns</p>\n",
646
  "</div>"
647
  ],
648
  "text/plain": [
 
653
  "3 102001 0 United States country NaN SFR \n",
654
  "4 102001 0 United States country NaN SFR \n",
655
  "... ... ... ... ... ... ... \n",
656
+ "578648 845172 769 Winfield, KS msa KS all homes \n",
657
+ "578649 845172 769 Winfield, KS msa KS all homes \n",
658
+ "578650 845172 769 Winfield, KS msa KS all homes \n",
659
+ "578651 845172 769 Winfield, KS msa KS all homes \n",
660
+ "578652 845172 769 Winfield, KS msa KS all homes \n",
661
  "\n",
662
+ " Date Median Listing Price Median Listing Price (Smoothed) \\\n",
663
+ "0 2018-01-13 259000.0 NaN \n",
664
+ "1 2018-01-20 259900.0 NaN \n",
665
+ "2 2018-01-27 259900.0 NaN \n",
666
+ "3 2018-02-03 260000.0 259700.0 \n",
667
+ "4 2018-02-10 264900.0 261175.0 \n",
668
+ "... ... ... ... \n",
669
+ "578648 2023-12-09 134950.0 138913.0 \n",
670
+ "578649 2023-12-16 120000.0 133938.0 \n",
671
+ "578650 2023-12-23 111000.0 126463.0 \n",
672
+ "578651 2023-12-30 126950.0 123225.0 \n",
673
+ "578652 2024-01-06 128000.0 121488.0 \n",
674
  "\n",
675
+ " New Pending (Smoothed) New Listings New Listings (Smoothed) \\\n",
676
+ "0 NaN NaN NaN \n",
677
+ "1 NaN NaN NaN \n",
678
+ "2 NaN NaN NaN \n",
679
+ "3 NaN NaN NaN \n",
680
+ "4 NaN NaN NaN \n",
681
+ "... ... ... ... \n",
682
+ "578648 NaN NaN NaN \n",
683
+ "578649 NaN NaN NaN \n",
684
+ "578650 NaN NaN NaN \n",
685
+ "578651 NaN NaN NaN \n",
686
+ "578652 NaN NaN NaN \n",
687
  "\n",
688
+ " New Pending \n",
689
+ "0 NaN \n",
690
+ "1 NaN \n",
691
+ "2 NaN \n",
692
+ "3 NaN \n",
693
+ "4 NaN \n",
694
+ "... ... \n",
695
+ "578648 NaN \n",
696
+ "578649 NaN \n",
697
+ "578650 NaN \n",
698
+ "578651 NaN \n",
699
+ "578652 NaN \n",
700
  "\n",
701
+ "[578653 rows x 13 columns]"
702
  ]
703
  },
704
+ "execution_count": 4,
705
  "metadata": {},
706
  "output_type": "execute_result"
707
  }
708
  ],
709
  "source": [
710
+ "# Adjust column names\n",
711
+ "final_df = combined_df.rename(\n",
712
  " columns={\n",
713
  " \"RegionID\": \"Region ID\",\n",
714
  " \"SizeRank\": \"Size Rank\",\n",
 
723
  },
724
  {
725
  "cell_type": "code",
726
+ "execution_count": 5,
727
  "metadata": {},
728
  "outputs": [],
729
  "source": [
730
+ "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
 
 
 
731
  ]
732
  }
733
  ],
processors/helpers.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+
4
+
5
+ def get_combined_df(data_frames, on):
6
+ combined_df = None
7
+ if len(data_frames) > 1:
8
+ # iterate over dataframes and merge or concat
9
+ combined_df = data_frames[0]
10
+ for i in range(1, len(data_frames)):
11
+ cur_df = data_frames[i]
12
+ combined_df = pd.merge(
13
+ combined_df,
14
+ cur_df,
15
+ on=on,
16
+ how="outer",
17
+ suffixes=("", "_" + str(i)),
18
+ )
19
+ elif len(data_frames) == 1:
20
+ combined_df = data_frames[0]
21
+
22
+ return combined_df
23
+
24
+
25
+ def coalesce_columns(df, columns_to_coalesce):
26
+ for index, row in df.iterrows():
27
+ for col in df.columns:
28
+ for column_to_coalesce in columns_to_coalesce:
29
+ if column_to_coalesce in col and "_" in col:
30
+ if not pd.isna(row[col]):
31
+ df.at[index, column_to_coalesce] = row[col]
32
+
33
+ # remove columns with underscores
34
+ combined_df = df[[col for col in df.columns if "_" not in col]]
35
+ return combined_df
36
+
37
+
38
+ def get_df(
39
+ df,
40
+ exclude_columns,
41
+ columns_to_pivot,
42
+ col_name,
43
+ filename,
44
+ ):
45
+ smoothed = "_sm_" in filename
46
+ seasonally_adjusted = "_sa_" in filename
47
+
48
+ if smoothed:
49
+ col_name += " (Smoothed)"
50
+ if seasonally_adjusted:
51
+ col_name += " (Seasonally Adjusted)"
52
+
53
+ df = pd.melt(
54
+ df,
55
+ id_vars=exclude_columns,
56
+ value_vars=columns_to_pivot,
57
+ var_name="Date",
58
+ value_name=col_name,
59
+ )
60
+ return df
61
+
62
+
63
+ def save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df):
64
+ if not os.path.exists(FULL_PROCESSED_DIR_PATH):
65
+ os.makedirs(FULL_PROCESSED_DIR_PATH)
66
+
67
+ final_df.to_json(
68
+ FULL_PROCESSED_DIR_PATH + "final.jsonl", orient="records", lines=True
69
+ )
processors/home_value_forecasts.ipynb CHANGED
@@ -2,17 +2,19 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
- "import os"
 
 
11
  ]
12
  },
13
  {
14
  "cell_type": "code",
15
- "execution_count": 2,
16
  "metadata": {},
17
  "outputs": [],
18
  "source": [
@@ -25,7 +27,7 @@
25
  },
26
  {
27
  "cell_type": "code",
28
- "execution_count": 3,
29
  "metadata": {},
30
  "outputs": [
31
  {
@@ -361,7 +363,7 @@
361
  "[21062 rows x 16 columns]"
362
  ]
363
  },
364
- "execution_count": 3,
365
  "metadata": {},
366
  "output_type": "execute_result"
367
  }
@@ -418,7 +420,7 @@
418
  },
419
  {
420
  "cell_type": "code",
421
- "execution_count": 7,
422
  "metadata": {},
423
  "outputs": [
424
  {
@@ -732,7 +734,7 @@
732
  "[21062 rows x 15 columns]"
733
  ]
734
  },
735
- "execution_count": 7,
736
  "metadata": {},
737
  "output_type": "execute_result"
738
  }
@@ -783,14 +785,11 @@
783
  },
784
  {
785
  "cell_type": "code",
786
- "execution_count": 8,
787
  "metadata": {},
788
  "outputs": [],
789
  "source": [
790
- "if not os.path.exists(FULL_PROCESSED_DIR_PATH):\n",
791
- " os.makedirs(FULL_PROCESSED_DIR_PATH)\n",
792
- "\n",
793
- "final_df.to_json(FULL_PROCESSED_DIR_PATH + \"final.jsonl\", orient=\"records\", lines=True)"
794
  ]
795
  }
796
  ],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 4,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
+ "import os\n",
11
+ "\n",
12
+ "from helpers import save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
16
  "cell_type": "code",
17
+ "execution_count": 5,
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
 
27
  },
28
  {
29
  "cell_type": "code",
30
+ "execution_count": 6,
31
  "metadata": {},
32
  "outputs": [
33
  {
 
363
  "[21062 rows x 16 columns]"
364
  ]
365
  },
366
+ "execution_count": 6,
367
  "metadata": {},
368
  "output_type": "execute_result"
369
  }
 
420
  },
421
  {
422
  "cell_type": "code",
423
+ "execution_count": 8,
424
  "metadata": {},
425
  "outputs": [
426
  {
 
734
  "[21062 rows x 15 columns]"
735
  ]
736
  },
737
+ "execution_count": 8,
738
  "metadata": {},
739
  "output_type": "execute_result"
740
  }
 
785
  },
786
  {
787
  "cell_type": "code",
788
+ "execution_count": 9,
789
  "metadata": {},
790
  "outputs": [],
791
  "source": [
792
+ "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
 
 
 
793
  ]
794
  }
795
  ],
processors/home_values.ipynb CHANGED
@@ -2,17 +2,19 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 17,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
- "import os"
 
 
11
  ]
12
  },
13
  {
14
  "cell_type": "code",
15
- "execution_count": 18,
16
  "metadata": {},
17
  "outputs": [],
18
  "source": [
@@ -25,7 +27,7 @@
25
  },
26
  {
27
  "cell_type": "code",
28
- "execution_count": 19,
29
  "metadata": {},
30
  "outputs": [
31
  {
@@ -65,7 +67,6 @@
65
  "processing Zip_zhvi_bdrmcnt_3_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
66
  "processing Neighborhood_zhvi_bdrmcnt_1_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
67
  "processing City_zhvi_bdrmcnt_3_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
68
- "processing County_zhvi_uc_sfr_tier_0.33_0.67_sm_sa_month (1).csv\n",
69
  "processing County_zhvi_bdrmcnt_1_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
70
  "processing Neighborhood_zhvi_uc_condo_tier_0.33_0.67_sm_sa_month.csv\n",
71
  "processing Metro_zhvi_uc_sfrcondo_tier_0.33_0.67_month.csv\n",
@@ -88,25 +89,7 @@
88
  "processing Neighborhood_zhvi_bdrmcnt_2_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
89
  "processing County_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
90
  "processing County_zhvi_bdrmcnt_2_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
91
- "processing Metro_zhvi_bdrmcnt_4_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
92
- "1\n",
93
- "10\n",
94
- "2\n",
95
- "10\n",
96
- "3\n",
97
- "10\n",
98
- "4\n",
99
- "10\n",
100
- "5\n",
101
- "10\n",
102
- "6\n",
103
- "10\n",
104
- "7\n",
105
- "10\n",
106
- "8\n",
107
- "10\n",
108
- "9\n",
109
- "10\n"
110
  ]
111
  },
112
  {
@@ -547,7 +530,7 @@
547
  "[117912 rows x 18 columns]"
548
  ]
549
  },
550
- "execution_count": 19,
551
  "metadata": {},
552
  "output_type": "execute_result"
553
  }
@@ -731,56 +714,27 @@
731
  " data_frames.append(cur_df)\n",
732
  "\n",
733
  "\n",
734
- "def get_combined_df(data_frames):\n",
735
- " combined_df = None\n",
736
- " if len(data_frames) > 1:\n",
737
- " # iterate over dataframes and merge or concat\n",
738
- " combined_df = data_frames[0]\n",
739
- " for i in range(1, len(data_frames)):\n",
740
- " print(i)\n",
741
- " print(len(data_frames))\n",
742
- " cur_df = data_frames[i]\n",
743
- " combined_df = pd.merge(\n",
744
- " combined_df,\n",
745
- " cur_df,\n",
746
- " on=[\n",
747
- " \"RegionID\",\n",
748
- " \"SizeRank\",\n",
749
- " \"RegionName\",\n",
750
- " \"RegionType\",\n",
751
- " \"StateName\",\n",
752
- " \"Bedroom Count\",\n",
753
- " \"Home Type\",\n",
754
- " \"Date\",\n",
755
- " ],\n",
756
- " how=\"outer\",\n",
757
- " suffixes=(\"\", \"_\" + str(i)),\n",
758
- " )\n",
759
- " elif len(data_frames) == 1:\n",
760
- " combined_df = data_frames[0]\n",
761
- "\n",
762
- " return combined_df\n",
763
- "\n",
764
- "\n",
765
- "combined_df = get_combined_df(data_frames)\n",
766
  "combined_df"
767
  ]
768
  },
769
  {
770
  "cell_type": "code",
771
- "execution_count": 20,
772
  "metadata": {},
773
  "outputs": [
774
- {
775
- "name": "stdout",
776
- "output_type": "stream",
777
- "text": [
778
- "ZHVI\n",
779
- "Mid Tier ZHVI\n",
780
- "Bottom Tier ZHVI\n",
781
- "Top Tier ZHVI\n"
782
- ]
783
- },
784
  {
785
  "data": {
786
  "text/html": [
@@ -1081,33 +1035,22 @@
1081
  "[117912 rows x 13 columns]"
1082
  ]
1083
  },
1084
- "execution_count": 20,
1085
  "metadata": {},
1086
  "output_type": "execute_result"
1087
  }
1088
  ],
1089
  "source": [
1090
- "# iterate over rows of combined df and coalesce column values across columns that start with \"Median Sale Price\"\n",
1091
  "columns_to_coalesce = [\"ZHVI\", \"Mid Tier ZHVI\", \"Bottom Tier ZHVI\", \"Top Tier ZHVI\"]\n",
1092
  "\n",
1093
- "for column_to_coalesce in columns_to_coalesce:\n",
1094
- " print(column_to_coalesce)\n",
1095
- " for index, row in combined_df.iterrows():\n",
1096
- " for col in combined_df.columns:\n",
1097
- " if column_to_coalesce in col and \"_\" in col:\n",
1098
- " if not pd.isna(row[col]):\n",
1099
- " combined_df.at[index, column_to_coalesce] = row[col]\n",
1100
- "\n",
1101
- "# remove columns with underscores\n",
1102
- "combined_df = combined_df[[col for col in combined_df.columns if \"_\" not in col]]\n",
1103
- "\n",
1104
  "\n",
1105
  "combined_df"
1106
  ]
1107
  },
1108
  {
1109
  "cell_type": "code",
1110
- "execution_count": 21,
1111
  "metadata": {},
1112
  "outputs": [
1113
  {
@@ -1140,10 +1083,15 @@
1140
  " <th>Home Type</th>\n",
1141
  " <th>Date</th>\n",
1142
  " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
 
 
1143
  " <th>Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
 
 
 
1144
  " <th>Top Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
1145
- " <th>ZHVI</th>\n",
1146
- " <th>Mid Tier ZHVI</th>\n",
1147
  " </tr>\n",
1148
  " </thead>\n",
1149
  " <tbody>\n",
@@ -1160,7 +1108,12 @@
1160
  " <td>NaN</td>\n",
1161
  " <td>NaN</td>\n",
1162
  " <td>NaN</td>\n",
1163
- " <td>81310.639504</td>\n",
 
 
 
 
 
1164
  " <td>81310.639504</td>\n",
1165
  " </tr>\n",
1166
  " <tr>\n",
@@ -1176,7 +1129,12 @@
1176
  " <td>NaN</td>\n",
1177
  " <td>NaN</td>\n",
1178
  " <td>NaN</td>\n",
1179
- " <td>80419.761984</td>\n",
 
 
 
 
 
1180
  " <td>80419.761984</td>\n",
1181
  " </tr>\n",
1182
  " <tr>\n",
@@ -1192,7 +1150,12 @@
1192
  " <td>NaN</td>\n",
1193
  " <td>NaN</td>\n",
1194
  " <td>NaN</td>\n",
1195
- " <td>80480.449461</td>\n",
 
 
 
 
 
1196
  " <td>80480.449461</td>\n",
1197
  " </tr>\n",
1198
  " <tr>\n",
@@ -1208,7 +1171,12 @@
1208
  " <td>NaN</td>\n",
1209
  " <td>NaN</td>\n",
1210
  " <td>NaN</td>\n",
1211
- " <td>79799.206525</td>\n",
 
 
 
 
 
1212
  " <td>79799.206525</td>\n",
1213
  " </tr>\n",
1214
  " <tr>\n",
@@ -1224,7 +1192,12 @@
1224
  " <td>NaN</td>\n",
1225
  " <td>NaN</td>\n",
1226
  " <td>NaN</td>\n",
1227
- " <td>79666.469861</td>\n",
 
 
 
 
 
1228
  " <td>79666.469861</td>\n",
1229
  " </tr>\n",
1230
  " <tr>\n",
@@ -1242,6 +1215,11 @@
1242
  " <td>...</td>\n",
1243
  " <td>...</td>\n",
1244
  " <td>...</td>\n",
 
 
 
 
 
1245
  " </tr>\n",
1246
  " <tr>\n",
1247
  " <th>117907</th>\n",
@@ -1256,8 +1234,13 @@
1256
  " <td>NaN</td>\n",
1257
  " <td>NaN</td>\n",
1258
  " <td>NaN</td>\n",
 
 
1259
  " <td>486974.735908</td>\n",
1260
- " <td>486974.735908</td>\n",
 
 
 
1261
  " </tr>\n",
1262
  " <tr>\n",
1263
  " <th>117908</th>\n",
@@ -1272,8 +1255,13 @@
1272
  " <td>NaN</td>\n",
1273
  " <td>NaN</td>\n",
1274
  " <td>NaN</td>\n",
 
 
1275
  " <td>485847.539614</td>\n",
1276
- " <td>485847.539614</td>\n",
 
 
 
1277
  " </tr>\n",
1278
  " <tr>\n",
1279
  " <th>117909</th>\n",
@@ -1288,8 +1276,13 @@
1288
  " <td>NaN</td>\n",
1289
  " <td>NaN</td>\n",
1290
  " <td>NaN</td>\n",
 
 
1291
  " <td>484223.885775</td>\n",
1292
- " <td>484223.885775</td>\n",
 
 
 
1293
  " </tr>\n",
1294
  " <tr>\n",
1295
  " <th>117910</th>\n",
@@ -1304,8 +1297,13 @@
1304
  " <td>NaN</td>\n",
1305
  " <td>NaN</td>\n",
1306
  " <td>NaN</td>\n",
 
 
1307
  " <td>481522.403338</td>\n",
1308
- " <td>481522.403338</td>\n",
 
 
 
1309
  " </tr>\n",
1310
  " <tr>\n",
1311
  " <th>117911</th>\n",
@@ -1320,12 +1318,17 @@
1320
  " <td>NaN</td>\n",
1321
  " <td>NaN</td>\n",
1322
  " <td>NaN</td>\n",
 
 
1323
  " <td>481181.718200</td>\n",
1324
- " <td>481181.718200</td>\n",
 
 
 
1325
  " </tr>\n",
1326
  " </tbody>\n",
1327
  "</table>\n",
1328
- "<p>117912 rows × 13 columns</p>\n",
1329
  "</div>"
1330
  ],
1331
  "text/plain": [
@@ -1368,6 +1371,32 @@
1368
  "117910 NaN \n",
1369
  "117911 NaN \n",
1370
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1371
  " Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted) \\\n",
1372
  "0 NaN \n",
1373
  "1 NaN \n",
@@ -1381,36 +1410,88 @@
1381
  "117910 NaN \n",
1382
  "117911 NaN \n",
1383
  "\n",
1384
- " Top Tier ZHVI (Smoothed) (Seasonally Adjusted) ZHVI \\\n",
1385
- "0 NaN 81310.639504 \n",
1386
- "1 NaN 80419.761984 \n",
1387
- "2 NaN 80480.449461 \n",
1388
- "3 NaN 79799.206525 \n",
1389
- "4 NaN 79666.469861 \n",
1390
- "... ... ... \n",
1391
- "117907 NaN 486974.735908 \n",
1392
- "117908 NaN 485847.539614 \n",
1393
- "117909 NaN 484223.885775 \n",
1394
- "117910 NaN 481522.403338 \n",
1395
- "117911 NaN 481181.718200 \n",
1396
  "\n",
1397
- " Mid Tier ZHVI \n",
1398
- "0 81310.639504 \n",
1399
- "1 80419.761984 \n",
1400
- "2 80480.449461 \n",
1401
- "3 79799.206525 \n",
1402
- "4 79666.469861 \n",
1403
- "... ... \n",
1404
- "117907 486974.735908 \n",
1405
- "117908 485847.539614 \n",
1406
- "117909 484223.885775 \n",
1407
- "117910 481522.403338 \n",
1408
- "117911 481181.718200 \n",
1409
  "\n",
1410
- "[117912 rows x 13 columns]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1411
  ]
1412
  },
1413
- "execution_count": 21,
1414
  "metadata": {},
1415
  "output_type": "execute_result"
1416
  }
@@ -1441,7 +1522,7 @@
1441
  },
1442
  {
1443
  "cell_type": "code",
1444
- "execution_count": 22,
1445
  "metadata": {},
1446
  "outputs": [
1447
  {
@@ -1474,10 +1555,15 @@
1474
  " <th>Home Type</th>\n",
1475
  " <th>Date</th>\n",
1476
  " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
 
 
1477
  " <th>Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
 
 
 
1478
  " <th>Top Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
1479
- " <th>ZHVI</th>\n",
1480
- " <th>Mid Tier ZHVI</th>\n",
1481
  " </tr>\n",
1482
  " </thead>\n",
1483
  " <tbody>\n",
@@ -1494,7 +1580,12 @@
1494
  " <td>NaN</td>\n",
1495
  " <td>NaN</td>\n",
1496
  " <td>NaN</td>\n",
1497
- " <td>81310.639504</td>\n",
 
 
 
 
 
1498
  " <td>81310.639504</td>\n",
1499
  " </tr>\n",
1500
  " <tr>\n",
@@ -1510,7 +1601,12 @@
1510
  " <td>NaN</td>\n",
1511
  " <td>NaN</td>\n",
1512
  " <td>NaN</td>\n",
1513
- " <td>80419.761984</td>\n",
 
 
 
 
 
1514
  " <td>80419.761984</td>\n",
1515
  " </tr>\n",
1516
  " <tr>\n",
@@ -1526,7 +1622,12 @@
1526
  " <td>NaN</td>\n",
1527
  " <td>NaN</td>\n",
1528
  " <td>NaN</td>\n",
1529
- " <td>80480.449461</td>\n",
 
 
 
 
 
1530
  " <td>80480.449461</td>\n",
1531
  " </tr>\n",
1532
  " <tr>\n",
@@ -1542,7 +1643,12 @@
1542
  " <td>NaN</td>\n",
1543
  " <td>NaN</td>\n",
1544
  " <td>NaN</td>\n",
1545
- " <td>79799.206525</td>\n",
 
 
 
 
 
1546
  " <td>79799.206525</td>\n",
1547
  " </tr>\n",
1548
  " <tr>\n",
@@ -1558,7 +1664,12 @@
1558
  " <td>NaN</td>\n",
1559
  " <td>NaN</td>\n",
1560
  " <td>NaN</td>\n",
1561
- " <td>79666.469861</td>\n",
 
 
 
 
 
1562
  " <td>79666.469861</td>\n",
1563
  " </tr>\n",
1564
  " <tr>\n",
@@ -1576,6 +1687,11 @@
1576
  " <td>...</td>\n",
1577
  " <td>...</td>\n",
1578
  " <td>...</td>\n",
 
 
 
 
 
1579
  " </tr>\n",
1580
  " <tr>\n",
1581
  " <th>117907</th>\n",
@@ -1590,8 +1706,13 @@
1590
  " <td>NaN</td>\n",
1591
  " <td>NaN</td>\n",
1592
  " <td>NaN</td>\n",
 
 
1593
  " <td>486974.735908</td>\n",
1594
- " <td>486974.735908</td>\n",
 
 
 
1595
  " </tr>\n",
1596
  " <tr>\n",
1597
  " <th>117908</th>\n",
@@ -1606,8 +1727,13 @@
1606
  " <td>NaN</td>\n",
1607
  " <td>NaN</td>\n",
1608
  " <td>NaN</td>\n",
 
 
1609
  " <td>485847.539614</td>\n",
1610
- " <td>485847.539614</td>\n",
 
 
 
1611
  " </tr>\n",
1612
  " <tr>\n",
1613
  " <th>117909</th>\n",
@@ -1622,8 +1748,13 @@
1622
  " <td>NaN</td>\n",
1623
  " <td>NaN</td>\n",
1624
  " <td>NaN</td>\n",
 
 
1625
  " <td>484223.885775</td>\n",
1626
- " <td>484223.885775</td>\n",
 
 
 
1627
  " </tr>\n",
1628
  " <tr>\n",
1629
  " <th>117910</th>\n",
@@ -1638,8 +1769,13 @@
1638
  " <td>NaN</td>\n",
1639
  " <td>NaN</td>\n",
1640
  " <td>NaN</td>\n",
 
 
1641
  " <td>481522.403338</td>\n",
1642
- " <td>481522.403338</td>\n",
 
 
 
1643
  " </tr>\n",
1644
  " <tr>\n",
1645
  " <th>117911</th>\n",
@@ -1654,12 +1790,17 @@
1654
  " <td>NaN</td>\n",
1655
  " <td>NaN</td>\n",
1656
  " <td>NaN</td>\n",
 
 
1657
  " <td>481181.718200</td>\n",
1658
- " <td>481181.718200</td>\n",
 
 
 
1659
  " </tr>\n",
1660
  " </tbody>\n",
1661
  "</table>\n",
1662
- "<p>117912 rows × 13 columns</p>\n",
1663
  "</div>"
1664
  ],
1665
  "text/plain": [
@@ -1702,6 +1843,32 @@
1702
  "117910 NaN \n",
1703
  "117911 NaN \n",
1704
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1705
  " Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted) \\\n",
1706
  "0 NaN \n",
1707
  "1 NaN \n",
@@ -1715,36 +1882,88 @@
1715
  "117910 NaN \n",
1716
  "117911 NaN \n",
1717
  "\n",
1718
- " Top Tier ZHVI (Smoothed) (Seasonally Adjusted) ZHVI \\\n",
1719
- "0 NaN 81310.639504 \n",
1720
- "1 NaN 80419.761984 \n",
1721
- "2 NaN 80480.449461 \n",
1722
- "3 NaN 79799.206525 \n",
1723
- "4 NaN 79666.469861 \n",
1724
- "... ... ... \n",
1725
- "117907 NaN 486974.735908 \n",
1726
- "117908 NaN 485847.539614 \n",
1727
- "117909 NaN 484223.885775 \n",
1728
- "117910 NaN 481522.403338 \n",
1729
- "117911 NaN 481181.718200 \n",
1730
  "\n",
1731
- " Mid Tier ZHVI \n",
1732
- "0 81310.639504 \n",
1733
- "1 80419.761984 \n",
1734
- "2 80480.449461 \n",
1735
- "3 79799.206525 \n",
1736
- "4 79666.469861 \n",
1737
- "... ... \n",
1738
- "117907 486974.735908 \n",
1739
- "117908 485847.539614 \n",
1740
- "117909 484223.885775 \n",
1741
- "117910 481522.403338 \n",
1742
- "117911 481181.718200 \n",
1743
  "\n",
1744
- "[117912 rows x 13 columns]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1745
  ]
1746
  },
1747
- "execution_count": 22,
1748
  "metadata": {},
1749
  "output_type": "execute_result"
1750
  }
@@ -1767,14 +1986,11 @@
1767
  },
1768
  {
1769
  "cell_type": "code",
1770
- "execution_count": 23,
1771
  "metadata": {},
1772
  "outputs": [],
1773
  "source": [
1774
- "if not os.path.exists(FULL_PROCESSED_DIR_PATH):\n",
1775
- " os.makedirs(FULL_PROCESSED_DIR_PATH)\n",
1776
- "\n",
1777
- "final_df.to_json(FULL_PROCESSED_DIR_PATH + \"final.jsonl\", orient=\"records\", lines=True)"
1778
  ]
1779
  }
1780
  ],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 8,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
+ "import os\n",
11
+ "\n",
12
+ "from helpers import get_combined_df, coalesce_columns, get_df, save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
16
  "cell_type": "code",
17
+ "execution_count": 9,
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
 
27
  },
28
  {
29
  "cell_type": "code",
30
+ "execution_count": 10,
31
  "metadata": {},
32
  "outputs": [
33
  {
 
67
  "processing Zip_zhvi_bdrmcnt_3_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
68
  "processing Neighborhood_zhvi_bdrmcnt_1_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
69
  "processing City_zhvi_bdrmcnt_3_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
 
70
  "processing County_zhvi_bdrmcnt_1_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
71
  "processing Neighborhood_zhvi_uc_condo_tier_0.33_0.67_sm_sa_month.csv\n",
72
  "processing Metro_zhvi_uc_sfrcondo_tier_0.33_0.67_month.csv\n",
 
89
  "processing Neighborhood_zhvi_bdrmcnt_2_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
90
  "processing County_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
91
  "processing County_zhvi_bdrmcnt_2_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n",
92
+ "processing Metro_zhvi_bdrmcnt_4_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  ]
94
  },
95
  {
 
530
  "[117912 rows x 18 columns]"
531
  ]
532
  },
533
+ "execution_count": 10,
534
  "metadata": {},
535
  "output_type": "execute_result"
536
  }
 
714
  " data_frames.append(cur_df)\n",
715
  "\n",
716
  "\n",
717
+ "combined_df = get_combined_df(\n",
718
+ " data_frames,\n",
719
+ " [\n",
720
+ " \"RegionID\",\n",
721
+ " \"SizeRank\",\n",
722
+ " \"RegionName\",\n",
723
+ " \"RegionType\",\n",
724
+ " \"StateName\",\n",
725
+ " \"Bedroom Count\",\n",
726
+ " \"Home Type\",\n",
727
+ " \"Date\",\n",
728
+ " ],\n",
729
+ ")\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
730
  "combined_df"
731
  ]
732
  },
733
  {
734
  "cell_type": "code",
735
+ "execution_count": 4,
736
  "metadata": {},
737
  "outputs": [
 
 
 
 
 
 
 
 
 
 
738
  {
739
  "data": {
740
  "text/html": [
 
1035
  "[117912 rows x 13 columns]"
1036
  ]
1037
  },
1038
+ "execution_count": 4,
1039
  "metadata": {},
1040
  "output_type": "execute_result"
1041
  }
1042
  ],
1043
  "source": [
 
1044
  "columns_to_coalesce = [\"ZHVI\", \"Mid Tier ZHVI\", \"Bottom Tier ZHVI\", \"Top Tier ZHVI\"]\n",
1045
  "\n",
1046
+ "combined_df = coalesce_columns(combined_df, columns_to_coalesce)\n",
 
 
 
 
 
 
 
 
 
 
1047
  "\n",
1048
  "combined_df"
1049
  ]
1050
  },
1051
  {
1052
  "cell_type": "code",
1053
+ "execution_count": 11,
1054
  "metadata": {},
1055
  "outputs": [
1056
  {
 
1083
  " <th>Home Type</th>\n",
1084
  " <th>Date</th>\n",
1085
  " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
1086
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_1</th>\n",
1087
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_2</th>\n",
1088
  " <th>Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
1089
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_4</th>\n",
1090
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_5</th>\n",
1091
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_6</th>\n",
1092
  " <th>Top Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
1093
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_8</th>\n",
1094
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_9</th>\n",
1095
  " </tr>\n",
1096
  " </thead>\n",
1097
  " <tbody>\n",
 
1108
  " <td>NaN</td>\n",
1109
  " <td>NaN</td>\n",
1110
  " <td>NaN</td>\n",
1111
+ " <td>NaN</td>\n",
1112
+ " <td>NaN</td>\n",
1113
+ " <td>NaN</td>\n",
1114
+ " <td>NaN</td>\n",
1115
+ " <td>NaN</td>\n",
1116
+ " <td>NaN</td>\n",
1117
  " <td>81310.639504</td>\n",
1118
  " </tr>\n",
1119
  " <tr>\n",
 
1129
  " <td>NaN</td>\n",
1130
  " <td>NaN</td>\n",
1131
  " <td>NaN</td>\n",
1132
+ " <td>NaN</td>\n",
1133
+ " <td>NaN</td>\n",
1134
+ " <td>NaN</td>\n",
1135
+ " <td>NaN</td>\n",
1136
+ " <td>NaN</td>\n",
1137
+ " <td>NaN</td>\n",
1138
  " <td>80419.761984</td>\n",
1139
  " </tr>\n",
1140
  " <tr>\n",
 
1150
  " <td>NaN</td>\n",
1151
  " <td>NaN</td>\n",
1152
  " <td>NaN</td>\n",
1153
+ " <td>NaN</td>\n",
1154
+ " <td>NaN</td>\n",
1155
+ " <td>NaN</td>\n",
1156
+ " <td>NaN</td>\n",
1157
+ " <td>NaN</td>\n",
1158
+ " <td>NaN</td>\n",
1159
  " <td>80480.449461</td>\n",
1160
  " </tr>\n",
1161
  " <tr>\n",
 
1171
  " <td>NaN</td>\n",
1172
  " <td>NaN</td>\n",
1173
  " <td>NaN</td>\n",
1174
+ " <td>NaN</td>\n",
1175
+ " <td>NaN</td>\n",
1176
+ " <td>NaN</td>\n",
1177
+ " <td>NaN</td>\n",
1178
+ " <td>NaN</td>\n",
1179
+ " <td>NaN</td>\n",
1180
  " <td>79799.206525</td>\n",
1181
  " </tr>\n",
1182
  " <tr>\n",
 
1192
  " <td>NaN</td>\n",
1193
  " <td>NaN</td>\n",
1194
  " <td>NaN</td>\n",
1195
+ " <td>NaN</td>\n",
1196
+ " <td>NaN</td>\n",
1197
+ " <td>NaN</td>\n",
1198
+ " <td>NaN</td>\n",
1199
+ " <td>NaN</td>\n",
1200
+ " <td>NaN</td>\n",
1201
  " <td>79666.469861</td>\n",
1202
  " </tr>\n",
1203
  " <tr>\n",
 
1215
  " <td>...</td>\n",
1216
  " <td>...</td>\n",
1217
  " <td>...</td>\n",
1218
+ " <td>...</td>\n",
1219
+ " <td>...</td>\n",
1220
+ " <td>...</td>\n",
1221
+ " <td>...</td>\n",
1222
+ " <td>...</td>\n",
1223
  " </tr>\n",
1224
  " <tr>\n",
1225
  " <th>117907</th>\n",
 
1234
  " <td>NaN</td>\n",
1235
  " <td>NaN</td>\n",
1236
  " <td>NaN</td>\n",
1237
+ " <td>NaN</td>\n",
1238
+ " <td>NaN</td>\n",
1239
  " <td>486974.735908</td>\n",
1240
+ " <td>NaN</td>\n",
1241
+ " <td>NaN</td>\n",
1242
+ " <td>NaN</td>\n",
1243
+ " <td>NaN</td>\n",
1244
  " </tr>\n",
1245
  " <tr>\n",
1246
  " <th>117908</th>\n",
 
1255
  " <td>NaN</td>\n",
1256
  " <td>NaN</td>\n",
1257
  " <td>NaN</td>\n",
1258
+ " <td>NaN</td>\n",
1259
+ " <td>NaN</td>\n",
1260
  " <td>485847.539614</td>\n",
1261
+ " <td>NaN</td>\n",
1262
+ " <td>NaN</td>\n",
1263
+ " <td>NaN</td>\n",
1264
+ " <td>NaN</td>\n",
1265
  " </tr>\n",
1266
  " <tr>\n",
1267
  " <th>117909</th>\n",
 
1276
  " <td>NaN</td>\n",
1277
  " <td>NaN</td>\n",
1278
  " <td>NaN</td>\n",
1279
+ " <td>NaN</td>\n",
1280
+ " <td>NaN</td>\n",
1281
  " <td>484223.885775</td>\n",
1282
+ " <td>NaN</td>\n",
1283
+ " <td>NaN</td>\n",
1284
+ " <td>NaN</td>\n",
1285
+ " <td>NaN</td>\n",
1286
  " </tr>\n",
1287
  " <tr>\n",
1288
  " <th>117910</th>\n",
 
1297
  " <td>NaN</td>\n",
1298
  " <td>NaN</td>\n",
1299
  " <td>NaN</td>\n",
1300
+ " <td>NaN</td>\n",
1301
+ " <td>NaN</td>\n",
1302
  " <td>481522.403338</td>\n",
1303
+ " <td>NaN</td>\n",
1304
+ " <td>NaN</td>\n",
1305
+ " <td>NaN</td>\n",
1306
+ " <td>NaN</td>\n",
1307
  " </tr>\n",
1308
  " <tr>\n",
1309
  " <th>117911</th>\n",
 
1318
  " <td>NaN</td>\n",
1319
  " <td>NaN</td>\n",
1320
  " <td>NaN</td>\n",
1321
+ " <td>NaN</td>\n",
1322
+ " <td>NaN</td>\n",
1323
  " <td>481181.718200</td>\n",
1324
+ " <td>NaN</td>\n",
1325
+ " <td>NaN</td>\n",
1326
+ " <td>NaN</td>\n",
1327
+ " <td>NaN</td>\n",
1328
  " </tr>\n",
1329
  " </tbody>\n",
1330
  "</table>\n",
1331
+ "<p>117912 rows × 18 columns</p>\n",
1332
  "</div>"
1333
  ],
1334
  "text/plain": [
 
1371
  "117910 NaN \n",
1372
  "117911 NaN \n",
1373
  "\n",
1374
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_1 \\\n",
1375
+ "0 NaN \n",
1376
+ "1 NaN \n",
1377
+ "2 NaN \n",
1378
+ "3 NaN \n",
1379
+ "4 NaN \n",
1380
+ "... ... \n",
1381
+ "117907 NaN \n",
1382
+ "117908 NaN \n",
1383
+ "117909 NaN \n",
1384
+ "117910 NaN \n",
1385
+ "117911 NaN \n",
1386
+ "\n",
1387
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_2 \\\n",
1388
+ "0 NaN \n",
1389
+ "1 NaN \n",
1390
+ "2 NaN \n",
1391
+ "3 NaN \n",
1392
+ "4 NaN \n",
1393
+ "... ... \n",
1394
+ "117907 NaN \n",
1395
+ "117908 NaN \n",
1396
+ "117909 NaN \n",
1397
+ "117910 NaN \n",
1398
+ "117911 NaN \n",
1399
+ "\n",
1400
  " Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted) \\\n",
1401
  "0 NaN \n",
1402
  "1 NaN \n",
 
1410
  "117910 NaN \n",
1411
  "117911 NaN \n",
1412
  "\n",
1413
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_4 \\\n",
1414
+ "0 NaN \n",
1415
+ "1 NaN \n",
1416
+ "2 NaN \n",
1417
+ "3 NaN \n",
1418
+ "4 NaN \n",
1419
+ "... ... \n",
1420
+ "117907 NaN \n",
1421
+ "117908 NaN \n",
1422
+ "117909 NaN \n",
1423
+ "117910 NaN \n",
1424
+ "117911 NaN \n",
1425
  "\n",
1426
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_5 \\\n",
1427
+ "0 NaN \n",
1428
+ "1 NaN \n",
1429
+ "2 NaN \n",
1430
+ "3 NaN \n",
1431
+ "4 NaN \n",
1432
+ "... ... \n",
1433
+ "117907 486974.735908 \n",
1434
+ "117908 485847.539614 \n",
1435
+ "117909 484223.885775 \n",
1436
+ "117910 481522.403338 \n",
1437
+ "117911 481181.718200 \n",
1438
  "\n",
1439
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_6 \\\n",
1440
+ "0 NaN \n",
1441
+ "1 NaN \n",
1442
+ "2 NaN \n",
1443
+ "3 NaN \n",
1444
+ "4 NaN \n",
1445
+ "... ... \n",
1446
+ "117907 NaN \n",
1447
+ "117908 NaN \n",
1448
+ "117909 NaN \n",
1449
+ "117910 NaN \n",
1450
+ "117911 NaN \n",
1451
+ "\n",
1452
+ " Top Tier ZHVI (Smoothed) (Seasonally Adjusted) \\\n",
1453
+ "0 NaN \n",
1454
+ "1 NaN \n",
1455
+ "2 NaN \n",
1456
+ "3 NaN \n",
1457
+ "4 NaN \n",
1458
+ "... ... \n",
1459
+ "117907 NaN \n",
1460
+ "117908 NaN \n",
1461
+ "117909 NaN \n",
1462
+ "117910 NaN \n",
1463
+ "117911 NaN \n",
1464
+ "\n",
1465
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_8 \\\n",
1466
+ "0 NaN \n",
1467
+ "1 NaN \n",
1468
+ "2 NaN \n",
1469
+ "3 NaN \n",
1470
+ "4 NaN \n",
1471
+ "... ... \n",
1472
+ "117907 NaN \n",
1473
+ "117908 NaN \n",
1474
+ "117909 NaN \n",
1475
+ "117910 NaN \n",
1476
+ "117911 NaN \n",
1477
+ "\n",
1478
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_9 \n",
1479
+ "0 81310.639504 \n",
1480
+ "1 80419.761984 \n",
1481
+ "2 80480.449461 \n",
1482
+ "3 79799.206525 \n",
1483
+ "4 79666.469861 \n",
1484
+ "... ... \n",
1485
+ "117907 NaN \n",
1486
+ "117908 NaN \n",
1487
+ "117909 NaN \n",
1488
+ "117910 NaN \n",
1489
+ "117911 NaN \n",
1490
+ "\n",
1491
+ "[117912 rows x 18 columns]"
1492
  ]
1493
  },
1494
+ "execution_count": 11,
1495
  "metadata": {},
1496
  "output_type": "execute_result"
1497
  }
 
1522
  },
1523
  {
1524
  "cell_type": "code",
1525
+ "execution_count": 12,
1526
  "metadata": {},
1527
  "outputs": [
1528
  {
 
1555
  " <th>Home Type</th>\n",
1556
  " <th>Date</th>\n",
1557
  " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
1558
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_1</th>\n",
1559
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_2</th>\n",
1560
  " <th>Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
1561
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_4</th>\n",
1562
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_5</th>\n",
1563
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_6</th>\n",
1564
  " <th>Top Tier ZHVI (Smoothed) (Seasonally Adjusted)</th>\n",
1565
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_8</th>\n",
1566
+ " <th>Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_9</th>\n",
1567
  " </tr>\n",
1568
  " </thead>\n",
1569
  " <tbody>\n",
 
1580
  " <td>NaN</td>\n",
1581
  " <td>NaN</td>\n",
1582
  " <td>NaN</td>\n",
1583
+ " <td>NaN</td>\n",
1584
+ " <td>NaN</td>\n",
1585
+ " <td>NaN</td>\n",
1586
+ " <td>NaN</td>\n",
1587
+ " <td>NaN</td>\n",
1588
+ " <td>NaN</td>\n",
1589
  " <td>81310.639504</td>\n",
1590
  " </tr>\n",
1591
  " <tr>\n",
 
1601
  " <td>NaN</td>\n",
1602
  " <td>NaN</td>\n",
1603
  " <td>NaN</td>\n",
1604
+ " <td>NaN</td>\n",
1605
+ " <td>NaN</td>\n",
1606
+ " <td>NaN</td>\n",
1607
+ " <td>NaN</td>\n",
1608
+ " <td>NaN</td>\n",
1609
+ " <td>NaN</td>\n",
1610
  " <td>80419.761984</td>\n",
1611
  " </tr>\n",
1612
  " <tr>\n",
 
1622
  " <td>NaN</td>\n",
1623
  " <td>NaN</td>\n",
1624
  " <td>NaN</td>\n",
1625
+ " <td>NaN</td>\n",
1626
+ " <td>NaN</td>\n",
1627
+ " <td>NaN</td>\n",
1628
+ " <td>NaN</td>\n",
1629
+ " <td>NaN</td>\n",
1630
+ " <td>NaN</td>\n",
1631
  " <td>80480.449461</td>\n",
1632
  " </tr>\n",
1633
  " <tr>\n",
 
1643
  " <td>NaN</td>\n",
1644
  " <td>NaN</td>\n",
1645
  " <td>NaN</td>\n",
1646
+ " <td>NaN</td>\n",
1647
+ " <td>NaN</td>\n",
1648
+ " <td>NaN</td>\n",
1649
+ " <td>NaN</td>\n",
1650
+ " <td>NaN</td>\n",
1651
+ " <td>NaN</td>\n",
1652
  " <td>79799.206525</td>\n",
1653
  " </tr>\n",
1654
  " <tr>\n",
 
1664
  " <td>NaN</td>\n",
1665
  " <td>NaN</td>\n",
1666
  " <td>NaN</td>\n",
1667
+ " <td>NaN</td>\n",
1668
+ " <td>NaN</td>\n",
1669
+ " <td>NaN</td>\n",
1670
+ " <td>NaN</td>\n",
1671
+ " <td>NaN</td>\n",
1672
+ " <td>NaN</td>\n",
1673
  " <td>79666.469861</td>\n",
1674
  " </tr>\n",
1675
  " <tr>\n",
 
1687
  " <td>...</td>\n",
1688
  " <td>...</td>\n",
1689
  " <td>...</td>\n",
1690
+ " <td>...</td>\n",
1691
+ " <td>...</td>\n",
1692
+ " <td>...</td>\n",
1693
+ " <td>...</td>\n",
1694
+ " <td>...</td>\n",
1695
  " </tr>\n",
1696
  " <tr>\n",
1697
  " <th>117907</th>\n",
 
1706
  " <td>NaN</td>\n",
1707
  " <td>NaN</td>\n",
1708
  " <td>NaN</td>\n",
1709
+ " <td>NaN</td>\n",
1710
+ " <td>NaN</td>\n",
1711
  " <td>486974.735908</td>\n",
1712
+ " <td>NaN</td>\n",
1713
+ " <td>NaN</td>\n",
1714
+ " <td>NaN</td>\n",
1715
+ " <td>NaN</td>\n",
1716
  " </tr>\n",
1717
  " <tr>\n",
1718
  " <th>117908</th>\n",
 
1727
  " <td>NaN</td>\n",
1728
  " <td>NaN</td>\n",
1729
  " <td>NaN</td>\n",
1730
+ " <td>NaN</td>\n",
1731
+ " <td>NaN</td>\n",
1732
  " <td>485847.539614</td>\n",
1733
+ " <td>NaN</td>\n",
1734
+ " <td>NaN</td>\n",
1735
+ " <td>NaN</td>\n",
1736
+ " <td>NaN</td>\n",
1737
  " </tr>\n",
1738
  " <tr>\n",
1739
  " <th>117909</th>\n",
 
1748
  " <td>NaN</td>\n",
1749
  " <td>NaN</td>\n",
1750
  " <td>NaN</td>\n",
1751
+ " <td>NaN</td>\n",
1752
+ " <td>NaN</td>\n",
1753
  " <td>484223.885775</td>\n",
1754
+ " <td>NaN</td>\n",
1755
+ " <td>NaN</td>\n",
1756
+ " <td>NaN</td>\n",
1757
+ " <td>NaN</td>\n",
1758
  " </tr>\n",
1759
  " <tr>\n",
1760
  " <th>117910</th>\n",
 
1769
  " <td>NaN</td>\n",
1770
  " <td>NaN</td>\n",
1771
  " <td>NaN</td>\n",
1772
+ " <td>NaN</td>\n",
1773
+ " <td>NaN</td>\n",
1774
  " <td>481522.403338</td>\n",
1775
+ " <td>NaN</td>\n",
1776
+ " <td>NaN</td>\n",
1777
+ " <td>NaN</td>\n",
1778
+ " <td>NaN</td>\n",
1779
  " </tr>\n",
1780
  " <tr>\n",
1781
  " <th>117911</th>\n",
 
1790
  " <td>NaN</td>\n",
1791
  " <td>NaN</td>\n",
1792
  " <td>NaN</td>\n",
1793
+ " <td>NaN</td>\n",
1794
+ " <td>NaN</td>\n",
1795
  " <td>481181.718200</td>\n",
1796
+ " <td>NaN</td>\n",
1797
+ " <td>NaN</td>\n",
1798
+ " <td>NaN</td>\n",
1799
+ " <td>NaN</td>\n",
1800
  " </tr>\n",
1801
  " </tbody>\n",
1802
  "</table>\n",
1803
+ "<p>117912 rows × 18 columns</p>\n",
1804
  "</div>"
1805
  ],
1806
  "text/plain": [
 
1843
  "117910 NaN \n",
1844
  "117911 NaN \n",
1845
  "\n",
1846
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_1 \\\n",
1847
+ "0 NaN \n",
1848
+ "1 NaN \n",
1849
+ "2 NaN \n",
1850
+ "3 NaN \n",
1851
+ "4 NaN \n",
1852
+ "... ... \n",
1853
+ "117907 NaN \n",
1854
+ "117908 NaN \n",
1855
+ "117909 NaN \n",
1856
+ "117910 NaN \n",
1857
+ "117911 NaN \n",
1858
+ "\n",
1859
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_2 \\\n",
1860
+ "0 NaN \n",
1861
+ "1 NaN \n",
1862
+ "2 NaN \n",
1863
+ "3 NaN \n",
1864
+ "4 NaN \n",
1865
+ "... ... \n",
1866
+ "117907 NaN \n",
1867
+ "117908 NaN \n",
1868
+ "117909 NaN \n",
1869
+ "117910 NaN \n",
1870
+ "117911 NaN \n",
1871
+ "\n",
1872
  " Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted) \\\n",
1873
  "0 NaN \n",
1874
  "1 NaN \n",
 
1882
  "117910 NaN \n",
1883
  "117911 NaN \n",
1884
  "\n",
1885
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_4 \\\n",
1886
+ "0 NaN \n",
1887
+ "1 NaN \n",
1888
+ "2 NaN \n",
1889
+ "3 NaN \n",
1890
+ "4 NaN \n",
1891
+ "... ... \n",
1892
+ "117907 NaN \n",
1893
+ "117908 NaN \n",
1894
+ "117909 NaN \n",
1895
+ "117910 NaN \n",
1896
+ "117911 NaN \n",
1897
  "\n",
1898
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_5 \\\n",
1899
+ "0 NaN \n",
1900
+ "1 NaN \n",
1901
+ "2 NaN \n",
1902
+ "3 NaN \n",
1903
+ "4 NaN \n",
1904
+ "... ... \n",
1905
+ "117907 486974.735908 \n",
1906
+ "117908 485847.539614 \n",
1907
+ "117909 484223.885775 \n",
1908
+ "117910 481522.403338 \n",
1909
+ "117911 481181.718200 \n",
1910
  "\n",
1911
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_6 \\\n",
1912
+ "0 NaN \n",
1913
+ "1 NaN \n",
1914
+ "2 NaN \n",
1915
+ "3 NaN \n",
1916
+ "4 NaN \n",
1917
+ "... ... \n",
1918
+ "117907 NaN \n",
1919
+ "117908 NaN \n",
1920
+ "117909 NaN \n",
1921
+ "117910 NaN \n",
1922
+ "117911 NaN \n",
1923
+ "\n",
1924
+ " Top Tier ZHVI (Smoothed) (Seasonally Adjusted) \\\n",
1925
+ "0 NaN \n",
1926
+ "1 NaN \n",
1927
+ "2 NaN \n",
1928
+ "3 NaN \n",
1929
+ "4 NaN \n",
1930
+ "... ... \n",
1931
+ "117907 NaN \n",
1932
+ "117908 NaN \n",
1933
+ "117909 NaN \n",
1934
+ "117910 NaN \n",
1935
+ "117911 NaN \n",
1936
+ "\n",
1937
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_8 \\\n",
1938
+ "0 NaN \n",
1939
+ "1 NaN \n",
1940
+ "2 NaN \n",
1941
+ "3 NaN \n",
1942
+ "4 NaN \n",
1943
+ "... ... \n",
1944
+ "117907 NaN \n",
1945
+ "117908 NaN \n",
1946
+ "117909 NaN \n",
1947
+ "117910 NaN \n",
1948
+ "117911 NaN \n",
1949
+ "\n",
1950
+ " Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)_9 \n",
1951
+ "0 81310.639504 \n",
1952
+ "1 80419.761984 \n",
1953
+ "2 80480.449461 \n",
1954
+ "3 79799.206525 \n",
1955
+ "4 79666.469861 \n",
1956
+ "... ... \n",
1957
+ "117907 NaN \n",
1958
+ "117908 NaN \n",
1959
+ "117909 NaN \n",
1960
+ "117910 NaN \n",
1961
+ "117911 NaN \n",
1962
+ "\n",
1963
+ "[117912 rows x 18 columns]"
1964
  ]
1965
  },
1966
+ "execution_count": 12,
1967
  "metadata": {},
1968
  "output_type": "execute_result"
1969
  }
 
1986
  },
1987
  {
1988
  "cell_type": "code",
1989
+ "execution_count": 13,
1990
  "metadata": {},
1991
  "outputs": [],
1992
  "source": [
1993
+ "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
 
 
 
1994
  ]
1995
  }
1996
  ],
processors/new_construction.ipynb CHANGED
@@ -2,17 +2,19 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 2,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
- "import os"
 
 
11
  ]
12
  },
13
  {
14
  "cell_type": "code",
15
- "execution_count": 3,
16
  "metadata": {},
17
  "outputs": [],
18
  "source": [
@@ -25,7 +27,7 @@
25
  },
26
  {
27
  "cell_type": "code",
28
- "execution_count": 38,
29
  "metadata": {},
30
  "outputs": [
31
  {
@@ -255,7 +257,7 @@
255
  "[49487 rows x 10 columns]"
256
  ]
257
  },
258
- "execution_count": 38,
259
  "metadata": {},
260
  "output_type": "execute_result"
261
  }
@@ -320,54 +322,29 @@
320
  " data_frames.append(cur_df)\n",
321
  "\n",
322
  "\n",
323
- "def get_combined_df(data_frames):\n",
324
- " combined_df = None\n",
325
- " if len(data_frames) > 1:\n",
326
- " # iterate over dataframes and merge or concat\n",
327
- " combined_df = data_frames[0]\n",
328
- " for i in range(1, len(data_frames)):\n",
329
- " cur_df = data_frames[i]\n",
330
- " combined_df = pd.merge(\n",
331
- " combined_df,\n",
332
- " cur_df,\n",
333
- " on=[\n",
334
- " \"RegionID\",\n",
335
- " \"SizeRank\",\n",
336
- " \"RegionName\",\n",
337
- " \"RegionType\",\n",
338
- " \"StateName\",\n",
339
- " \"Home Type\",\n",
340
- " \"Date\",\n",
341
- " ],\n",
342
- " how=\"outer\",\n",
343
- " suffixes=(\"\", \"_\" + str(i)),\n",
344
- " )\n",
345
- " elif len(data_frames) == 1:\n",
346
- " combined_df = data_frames[0]\n",
347
- "\n",
348
- " return combined_df\n",
349
- "\n",
350
- "\n",
351
- "combined_df = get_combined_df(data_frames)\n",
352
  "# iterate over rows of combined df and coalesce column values across columns that start with \"Median Sale Price\"\n",
353
  "columns_to_coalesce = [\"Sales Count\", \"Median Sale Price\", \"Median Sale Price per Sqft\"]\n",
354
  "\n",
355
- "for index, row in combined_df.iterrows():\n",
356
- " for col in combined_df.columns:\n",
357
- " for column_to_coalesce in columns_to_coalesce:\n",
358
- " if column_to_coalesce in col and \"_\" in col:\n",
359
- " if not pd.isna(row[col]):\n",
360
- " combined_df.at[index, column_to_coalesce] = row[col]\n",
361
- "\n",
362
- "# remove columns with underscores\n",
363
- "combined_df = combined_df[[col for col in combined_df.columns if \"_\" not in col]]\n",
364
  "\n",
365
  "combined_df"
366
  ]
367
  },
368
  {
369
  "cell_type": "code",
370
- "execution_count": 39,
371
  "metadata": {},
372
  "outputs": [
373
  {
@@ -582,7 +559,7 @@
582
  "[49487 rows x 10 columns]"
583
  ]
584
  },
585
- "execution_count": 39,
586
  "metadata": {},
587
  "output_type": "execute_result"
588
  }
@@ -604,14 +581,11 @@
604
  },
605
  {
606
  "cell_type": "code",
607
- "execution_count": 40,
608
  "metadata": {},
609
  "outputs": [],
610
  "source": [
611
- "if not os.path.exists(FULL_PROCESSED_DIR_PATH):\n",
612
- " os.makedirs(FULL_PROCESSED_DIR_PATH)\n",
613
- "\n",
614
- "final_df.to_json(FULL_PROCESSED_DIR_PATH + \"final.jsonl\", orient=\"records\", lines=True)"
615
  ]
616
  }
617
  ],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
+ "import os\n",
11
+ "\n",
12
+ "from helpers import get_combined_df, coalesce_columns, get_df, save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
16
  "cell_type": "code",
17
+ "execution_count": 2,
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
 
27
  },
28
  {
29
  "cell_type": "code",
30
+ "execution_count": 3,
31
  "metadata": {},
32
  "outputs": [
33
  {
 
257
  "[49487 rows x 10 columns]"
258
  ]
259
  },
260
+ "execution_count": 3,
261
  "metadata": {},
262
  "output_type": "execute_result"
263
  }
 
322
  " data_frames.append(cur_df)\n",
323
  "\n",
324
  "\n",
325
+ "combined_df = get_combined_df(\n",
326
+ " data_frames,\n",
327
+ " [\n",
328
+ " \"RegionID\",\n",
329
+ " \"SizeRank\",\n",
330
+ " \"RegionName\",\n",
331
+ " \"RegionType\",\n",
332
+ " \"StateName\",\n",
333
+ " \"Home Type\",\n",
334
+ " \"Date\",\n",
335
+ " ],\n",
336
+ ")\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
  "# iterate over rows of combined df and coalesce column values across columns that start with \"Median Sale Price\"\n",
338
  "columns_to_coalesce = [\"Sales Count\", \"Median Sale Price\", \"Median Sale Price per Sqft\"]\n",
339
  "\n",
340
+ "combined_df = coalesce_columns(combined_df, columns_to_coalesce)\n",
 
 
 
 
 
 
 
 
341
  "\n",
342
  "combined_df"
343
  ]
344
  },
345
  {
346
  "cell_type": "code",
347
+ "execution_count": 4,
348
  "metadata": {},
349
  "outputs": [
350
  {
 
559
  "[49487 rows x 10 columns]"
560
  ]
561
  },
562
+ "execution_count": 4,
563
  "metadata": {},
564
  "output_type": "execute_result"
565
  }
 
581
  },
582
  {
583
  "cell_type": "code",
584
+ "execution_count": 5,
585
  "metadata": {},
586
  "outputs": [],
587
  "source": [
588
+ "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
 
 
 
589
  ]
590
  }
591
  ],
processors/rentals.ipynb CHANGED
@@ -2,17 +2,19 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 2,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
- "import os"
 
 
11
  ]
12
  },
13
  {
14
  "cell_type": "code",
15
- "execution_count": 3,
16
  "metadata": {},
17
  "outputs": [],
18
  "source": [
@@ -25,7 +27,7 @@
25
  },
26
  {
27
  "cell_type": "code",
28
- "execution_count": 26,
29
  "metadata": {},
30
  "outputs": [
31
  {
@@ -326,7 +328,7 @@
326
  "[1258740 rows x 15 columns]"
327
  ]
328
  },
329
- "execution_count": 26,
330
  "metadata": {},
331
  "output_type": "execute_result"
332
  }
@@ -334,7 +336,6 @@
334
  "source": [
335
  "# base cols RegionID,SizeRank,RegionName,RegionType,StateName\n",
336
  "\n",
337
- "\n",
338
  "data_frames = []\n",
339
  "\n",
340
  "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
@@ -404,103 +405,42 @@
404
  " # Identify columns to pivot\n",
405
  " columns_to_pivot = [col for col in cur_df.columns if col not in exclude_columns]\n",
406
  "\n",
407
- " smoothed = \"_sm_\" in filename\n",
408
- " seasonally_adjusted = \"_sa_\" in filename\n",
409
- "\n",
410
- " col_name = \"Rent\"\n",
411
- " if smoothed:\n",
412
- " col_name += \" (Smoothed)\"\n",
413
- " if seasonally_adjusted:\n",
414
- " col_name += \" (Seasonally Adjusted)\"\n",
415
- " cur_df = pd.melt(\n",
416
  " cur_df,\n",
417
- " id_vars=exclude_columns,\n",
418
- " value_vars=columns_to_pivot,\n",
419
- " var_name=\"Date\",\n",
420
- " value_name=col_name,\n",
421
  " )\n",
422
- " data_frames.append(cur_df)\n",
423
- " # print(filename)\n",
424
- "\n",
425
- "\n",
426
- "def get_combined_df(data_frames):\n",
427
- " combined_df = None\n",
428
- " if len(data_frames) > 1:\n",
429
- " # iterate over dataframes and merge or concat\n",
430
- " combined_df = data_frames[0]\n",
431
- " for i in range(1, len(data_frames)):\n",
432
- " cur_df = data_frames[i]\n",
433
- " combined_df = pd.merge(\n",
434
- " combined_df,\n",
435
- " cur_df,\n",
436
- " on=[\n",
437
- " \"RegionID\",\n",
438
- " \"SizeRank\",\n",
439
- " \"RegionName\",\n",
440
- " \"RegionType\",\n",
441
- " \"StateName\",\n",
442
- " \"Home Type\",\n",
443
- " \"Date\",\n",
444
- " ],\n",
445
- " how=\"outer\",\n",
446
- " suffixes=(\"\", \"_\" + str(i)),\n",
447
- " )\n",
448
- " elif len(data_frames) == 1:\n",
449
- " combined_df = data_frames[0]\n",
450
  "\n",
451
- " return combined_df\n",
452
  "\n",
453
  "\n",
454
- "combined_df = get_combined_df(data_frames)\n",
 
 
 
 
 
 
 
 
 
 
 
455
  "\n",
456
  "\n",
457
  "# iterate over rows of combined df and coalesce column values across columns that start with \"Median Sale Price\"\n",
458
  "columns_to_coalesce = [\"Rent (Smoothed)\", \"Rent (Smoothed) (Seasonally Adjusted)\"]\n",
459
  "\n",
460
- "for index, row in combined_df.iterrows():\n",
461
- " for col in combined_df.columns:\n",
462
- " for column_to_coalesce in columns_to_coalesce:\n",
463
- " if column_to_coalesce in col and \"_\" in col:\n",
464
- " if not pd.isna(row[col]):\n",
465
- " combined_df.at[index, column_to_coalesce] = row[col]\n",
466
- "\n",
467
- "# remove columns with underscores\n",
468
- "combined_df = combined_df[[col for col in combined_df.columns if \"_\" not in col]]\n",
469
- "\n",
470
  "\n",
471
  "combined_df"
472
  ]
473
  },
474
  {
475
  "cell_type": "code",
476
- "execution_count": 27,
477
- "metadata": {},
478
- "outputs": [
479
- {
480
- "data": {
481
- "text/plain": [
482
- "Index(['RegionID', 'SizeRank', 'RegionName', 'RegionType', 'StateName',\n",
483
- " 'Home Type', 'State', 'Metro', 'StateCodeFIPS', 'MunicipalCodeFIPS',\n",
484
- " 'Date', 'Rent (Smoothed)', 'CountyName',\n",
485
- " 'Rent (Smoothed) (Seasonally Adjusted)', 'City'],\n",
486
- " dtype='object')"
487
- ]
488
- },
489
- "execution_count": 27,
490
- "metadata": {},
491
- "output_type": "execute_result"
492
- }
493
- ],
494
- "source": [
495
- "combined_df.columns\n",
496
- "# combined_df[\"RegionType\"].unique()\n",
497
- "\n",
498
- "# combined_df"
499
- ]
500
- },
501
- {
502
- "cell_type": "code",
503
- "execution_count": 32,
504
  "metadata": {},
505
  "outputs": [
506
  {
@@ -789,7 +729,7 @@
789
  "[1258740 rows x 14 columns]"
790
  ]
791
  },
792
- "execution_count": 32,
793
  "metadata": {},
794
  "output_type": "execute_result"
795
  }
@@ -813,7 +753,7 @@
813
  },
814
  {
815
  "cell_type": "code",
816
- "execution_count": 33,
817
  "metadata": {},
818
  "outputs": [
819
  {
@@ -1102,12 +1042,13 @@
1102
  "[1258740 rows x 14 columns]"
1103
  ]
1104
  },
1105
- "execution_count": 33,
1106
  "metadata": {},
1107
  "output_type": "execute_result"
1108
  }
1109
  ],
1110
  "source": [
 
1111
  "final_df = final_df.rename(\n",
1112
  " columns={\n",
1113
  " \"RegionID\": \"Region ID\",\n",
@@ -1124,14 +1065,11 @@
1124
  },
1125
  {
1126
  "cell_type": "code",
1127
- "execution_count": 35,
1128
  "metadata": {},
1129
  "outputs": [],
1130
  "source": [
1131
- "if not os.path.exists(FULL_PROCESSED_DIR_PATH):\n",
1132
- " os.makedirs(FULL_PROCESSED_DIR_PATH)\n",
1133
- "\n",
1134
- "final_df.to_json(FULL_PROCESSED_DIR_PATH + \"final.jsonl\", orient=\"records\", lines=True)"
1135
  ]
1136
  }
1137
  ],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
+ "import os\n",
11
+ "\n",
12
+ "from helpers import get_combined_df, coalesce_columns, get_df, save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
16
  "cell_type": "code",
17
+ "execution_count": 2,
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
 
27
  },
28
  {
29
  "cell_type": "code",
30
+ "execution_count": 4,
31
  "metadata": {},
32
  "outputs": [
33
  {
 
328
  "[1258740 rows x 15 columns]"
329
  ]
330
  },
331
+ "execution_count": 4,
332
  "metadata": {},
333
  "output_type": "execute_result"
334
  }
 
336
  "source": [
337
  "# base cols RegionID,SizeRank,RegionName,RegionType,StateName\n",
338
  "\n",
 
339
  "data_frames = []\n",
340
  "\n",
341
  "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
 
405
  " # Identify columns to pivot\n",
406
  " columns_to_pivot = [col for col in cur_df.columns if col not in exclude_columns]\n",
407
  "\n",
408
+ " cur_df = get_df(\n",
 
 
 
 
 
 
 
 
409
  " cur_df,\n",
410
+ " exclude_columns,\n",
411
+ " columns_to_pivot,\n",
412
+ " \"Rent\",\n",
413
+ " filename,\n",
414
  " )\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415
  "\n",
416
+ " data_frames.append(cur_df)\n",
417
  "\n",
418
  "\n",
419
+ "combined_df = get_combined_df(\n",
420
+ " data_frames,\n",
421
+ " [\n",
422
+ " \"RegionID\",\n",
423
+ " \"SizeRank\",\n",
424
+ " \"RegionName\",\n",
425
+ " \"RegionType\",\n",
426
+ " \"StateName\",\n",
427
+ " \"Home Type\",\n",
428
+ " \"Date\",\n",
429
+ " ],\n",
430
+ ")\n",
431
  "\n",
432
  "\n",
433
  "# iterate over rows of combined df and coalesce column values across columns that start with \"Median Sale Price\"\n",
434
  "columns_to_coalesce = [\"Rent (Smoothed)\", \"Rent (Smoothed) (Seasonally Adjusted)\"]\n",
435
  "\n",
436
+ "combined_df = coalesce_columns(combined_df, columns_to_coalesce)\n",
 
 
 
 
 
 
 
 
 
437
  "\n",
438
  "combined_df"
439
  ]
440
  },
441
  {
442
  "cell_type": "code",
443
+ "execution_count": 5,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444
  "metadata": {},
445
  "outputs": [
446
  {
 
729
  "[1258740 rows x 14 columns]"
730
  ]
731
  },
732
+ "execution_count": 5,
733
  "metadata": {},
734
  "output_type": "execute_result"
735
  }
 
753
  },
754
  {
755
  "cell_type": "code",
756
+ "execution_count": 6,
757
  "metadata": {},
758
  "outputs": [
759
  {
 
1042
  "[1258740 rows x 14 columns]"
1043
  ]
1044
  },
1045
+ "execution_count": 6,
1046
  "metadata": {},
1047
  "output_type": "execute_result"
1048
  }
1049
  ],
1050
  "source": [
1051
+ "# Adjust column names\n",
1052
  "final_df = final_df.rename(\n",
1053
  " columns={\n",
1054
  " \"RegionID\": \"Region ID\",\n",
 
1065
  },
1066
  {
1067
  "cell_type": "code",
1068
+ "execution_count": 7,
1069
  "metadata": {},
1070
  "outputs": [],
1071
  "source": [
1072
+ "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
 
 
 
1073
  ]
1074
  }
1075
  ],
processors/sales.ipynb CHANGED
@@ -2,17 +2,19 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 2,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
- "import os"
 
 
11
  ]
12
  },
13
  {
14
  "cell_type": "code",
15
- "execution_count": 3,
16
  "metadata": {},
17
  "outputs": [],
18
  "source": [
@@ -25,7 +27,7 @@
25
  },
26
  {
27
  "cell_type": "code",
28
- "execution_count": 5,
29
  "metadata": {},
30
  "outputs": [
31
  {
@@ -460,7 +462,7 @@
460
  "[504608 rows x 19 columns]"
461
  ]
462
  },
463
- "execution_count": 5,
464
  "metadata": {},
465
  "output_type": "execute_result"
466
  }
@@ -477,6 +479,15 @@
477
  " \"Home Type\",\n",
478
  "]\n",
479
  "\n",
 
 
 
 
 
 
 
 
 
480
  "data_frames = []\n",
481
  "\n",
482
  "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
@@ -496,131 +507,34 @@
496
  " # Identify columns to pivot\n",
497
  " columns_to_pivot = [col for col in cur_df.columns if col not in exclude_columns]\n",
498
  "\n",
499
- " smoothed = \"_sm_\" in filename\n",
500
- " seasonally_adjusted = \"_sa_\" in filename\n",
501
- "\n",
502
- " if \"_median_sale_to_list_\" in filename:\n",
503
- " col_name = \"Median Sale to List Ratio\"\n",
504
- " if smoothed:\n",
505
- " col_name += \" (Smoothed)\"\n",
506
- " if seasonally_adjusted:\n",
507
- " col_name += \" (Seasonally Adjusted)\"\n",
508
- "\n",
509
- " cur_df = pd.melt(\n",
510
- " cur_df,\n",
511
- " id_vars=exclude_columns,\n",
512
- " value_vars=columns_to_pivot,\n",
513
- " var_name=\"Date\",\n",
514
- " value_name=col_name,\n",
515
- " )\n",
516
- "\n",
517
- " elif \"_mean_sale_to_list_\" in filename:\n",
518
- " col_name = \"Mean Sale to List Ratio\"\n",
519
- " if smoothed:\n",
520
- " col_name += \" (Smoothed)\"\n",
521
- " if seasonally_adjusted:\n",
522
- " col_name += \" (Seasonally Adjusted)\"\n",
523
- "\n",
524
- " cur_df = pd.melt(\n",
525
- " cur_df,\n",
526
- " id_vars=exclude_columns,\n",
527
- " value_vars=columns_to_pivot,\n",
528
- " var_name=\"Date\",\n",
529
- " value_name=col_name,\n",
530
- " )\n",
531
- "\n",
532
- " elif \"_median_sale_price_\" in filename:\n",
533
- " col_name = \"Median Sale Price\"\n",
534
- " if smoothed:\n",
535
- " col_name += \" (Smoothed)\"\n",
536
- " if seasonally_adjusted:\n",
537
- " col_name += \" (Seasonally Adjusted)\"\n",
538
- "\n",
539
- " cur_df = pd.melt(\n",
540
- " cur_df,\n",
541
- " id_vars=exclude_columns,\n",
542
- " value_vars=columns_to_pivot,\n",
543
- " var_name=\"Date\",\n",
544
- " value_name=col_name,\n",
545
- " )\n",
546
  "\n",
547
- " elif \"_pct_sold_above_list_\" in filename:\n",
548
- " col_name = \"% Sold Above List\"\n",
549
- " if smoothed:\n",
550
- " col_name += \" (Smoothed)\"\n",
551
- " if seasonally_adjusted:\n",
552
- " col_name += \" (Seasonally Adjusted)\"\n",
553
  "\n",
554
- " cur_df = pd.melt(\n",
555
- " cur_df,\n",
556
- " id_vars=exclude_columns,\n",
557
- " value_vars=columns_to_pivot,\n",
558
- " var_name=\"Date\",\n",
559
- " value_name=col_name,\n",
560
- " )\n",
561
- "\n",
562
- " elif \"_pct_sold_below_list_\" in filename:\n",
563
- " col_name = \"% Sold Below List\"\n",
564
- " if smoothed:\n",
565
- " col_name += \" (Smoothed)\"\n",
566
- " if seasonally_adjusted:\n",
567
- " col_name += \" (Seasonally Adjusted)\"\n",
568
- "\n",
569
- " cur_df = pd.melt(\n",
570
- " cur_df,\n",
571
- " id_vars=exclude_columns,\n",
572
- " value_vars=columns_to_pivot,\n",
573
- " var_name=\"Date\",\n",
574
- " value_name=col_name,\n",
575
- " )\n",
576
- "\n",
577
- " elif \"_sales_count_now_\" in filename:\n",
578
- " col_name = \"Nowcast\"\n",
579
- " if smoothed:\n",
580
- " col_name += \" (Smoothed)\"\n",
581
- " if seasonally_adjusted:\n",
582
- " col_name += \" (Seasonally Adjusted)\"\n",
583
- "\n",
584
- " cur_df = pd.melt(\n",
585
- " cur_df,\n",
586
- " id_vars=exclude_columns,\n",
587
- " value_vars=columns_to_pivot,\n",
588
- " var_name=\"Date\",\n",
589
- " value_name=col_name,\n",
590
- " )\n",
591
- "\n",
592
- " data_frames.append(cur_df)\n",
593
- "\n",
594
- "\n",
595
- "def get_combined_df(data_frames):\n",
596
- " combined_df = None\n",
597
- " if len(data_frames) > 1:\n",
598
- " # iterate over dataframes and merge or concat\n",
599
- " combined_df = data_frames[0]\n",
600
- " for i in range(1, len(data_frames)):\n",
601
- " cur_df = data_frames[i]\n",
602
- " combined_df = pd.merge(\n",
603
- " combined_df,\n",
604
- " cur_df,\n",
605
- " on=[\n",
606
- " \"RegionID\",\n",
607
- " \"SizeRank\",\n",
608
- " \"RegionName\",\n",
609
- " \"RegionType\",\n",
610
- " \"StateName\",\n",
611
- " \"Home Type\",\n",
612
- " \"Date\",\n",
613
- " ],\n",
614
- " how=\"outer\",\n",
615
- " suffixes=(\"\", \"_\" + str(i)),\n",
616
- " )\n",
617
- " elif len(data_frames) == 1:\n",
618
- " combined_df = data_frames[0]\n",
619
- "\n",
620
- " return combined_df\n",
621
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
622
  "\n",
623
- "combined_df = get_combined_df(data_frames)\n",
624
  "# iterate over rows of combined df and coalesce column values across columns that start with \"Median Sale Price\"\n",
625
  "columns_to_coalesce = [\n",
626
  " \"Mean Sale to List Ratio (Smoothed)\"\n",
@@ -637,22 +551,14 @@
637
  " \"% Sold Above List (Smoothed)\",\n",
638
  "]\n",
639
  "\n",
640
- "for index, row in combined_df.iterrows():\n",
641
- " for col in combined_df.columns:\n",
642
- " for column_to_coalesce in columns_to_coalesce:\n",
643
- " if column_to_coalesce in col and \"_\" in col:\n",
644
- " if not pd.isna(row[col]):\n",
645
- " combined_df.at[index, column_to_coalesce] = row[col]\n",
646
- "\n",
647
- "# remove columns with underscores\n",
648
- "combined_df = combined_df[[col for col in combined_df.columns if \"_\" not in col]]\n",
649
  "\n",
650
  "combined_df"
651
  ]
652
  },
653
  {
654
  "cell_type": "code",
655
- "execution_count": 6,
656
  "metadata": {},
657
  "outputs": [
658
  {
@@ -1053,14 +959,14 @@
1053
  "[504608 rows x 19 columns]"
1054
  ]
1055
  },
1056
- "execution_count": 6,
1057
  "metadata": {},
1058
  "output_type": "execute_result"
1059
  }
1060
  ],
1061
  "source": [
1062
- "final_df = combined_df\n",
1063
- "final_df = final_df.rename(\n",
1064
  " columns={\n",
1065
  " \"RegionID\": \"Region ID\",\n",
1066
  " \"SizeRank\": \"Size Rank\",\n",
@@ -1075,14 +981,11 @@
1075
  },
1076
  {
1077
  "cell_type": "code",
1078
- "execution_count": 7,
1079
  "metadata": {},
1080
  "outputs": [],
1081
  "source": [
1082
- "if not os.path.exists(FULL_PROCESSED_DIR_PATH):\n",
1083
- " os.makedirs(FULL_PROCESSED_DIR_PATH)\n",
1084
- "\n",
1085
- "final_df.to_json(FULL_PROCESSED_DIR_PATH + \"final.jsonl\", orient=\"records\", lines=True)"
1086
  ]
1087
  }
1088
  ],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
+ "import os\n",
11
+ "\n",
12
+ "from helpers import get_combined_df, coalesce_columns, get_df, save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
16
  "cell_type": "code",
17
+ "execution_count": 2,
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
 
27
  },
28
  {
29
  "cell_type": "code",
30
+ "execution_count": 3,
31
  "metadata": {},
32
  "outputs": [
33
  {
 
462
  "[504608 rows x 19 columns]"
463
  ]
464
  },
465
+ "execution_count": 3,
466
  "metadata": {},
467
  "output_type": "execute_result"
468
  }
 
479
  " \"Home Type\",\n",
480
  "]\n",
481
  "\n",
482
+ "slug_column_mappings = {\n",
483
+ " \"_median_sale_to_list_\": \"Median Sale to List Ratio\",\n",
484
+ " \"_mean_sale_to_list_\": \"Mean Sale to List Ratio\",\n",
485
+ " \"_median_sale_price_\": \"Median Sale Price\",\n",
486
+ " \"_pct_sold_above_list_\": \"% Sold Above List\",\n",
487
+ " \"_pct_sold_below_list_\": \"% Sold Below List\",\n",
488
+ " \"_sales_count_now_\": \"Nowcast\",\n",
489
+ "}\n",
490
+ "\n",
491
  "data_frames = []\n",
492
  "\n",
493
  "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
 
507
  " # Identify columns to pivot\n",
508
  " columns_to_pivot = [col for col in cur_df.columns if col not in exclude_columns]\n",
509
  "\n",
510
+ " # iterate over slug column mappings and get df\n",
511
+ " for slug, col_name in slug_column_mappings.items():\n",
512
+ " if slug in filename:\n",
513
+ " cur_df = get_df(\n",
514
+ " cur_df,\n",
515
+ " exclude_columns,\n",
516
+ " columns_to_pivot,\n",
517
+ " col_name,\n",
518
+ " filename,\n",
519
+ " )\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520
  "\n",
521
+ " data_frames.append(cur_df)\n",
522
+ " break\n",
 
 
 
 
523
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
524
  "\n",
525
+ "combined_df = get_combined_df(\n",
526
+ " data_frames,\n",
527
+ " [\n",
528
+ " \"RegionID\",\n",
529
+ " \"SizeRank\",\n",
530
+ " \"RegionName\",\n",
531
+ " \"RegionType\",\n",
532
+ " \"StateName\",\n",
533
+ " \"Home Type\",\n",
534
+ " \"Date\",\n",
535
+ " ],\n",
536
+ ")\n",
537
  "\n",
 
538
  "# iterate over rows of combined df and coalesce column values across columns that start with \"Median Sale Price\"\n",
539
  "columns_to_coalesce = [\n",
540
  " \"Mean Sale to List Ratio (Smoothed)\"\n",
 
551
  " \"% Sold Above List (Smoothed)\",\n",
552
  "]\n",
553
  "\n",
554
+ "combined_df = coalesce_columns(combined_df, columns_to_coalesce)\n",
 
 
 
 
 
 
 
 
555
  "\n",
556
  "combined_df"
557
  ]
558
  },
559
  {
560
  "cell_type": "code",
561
+ "execution_count": 4,
562
  "metadata": {},
563
  "outputs": [
564
  {
 
959
  "[504608 rows x 19 columns]"
960
  ]
961
  },
962
+ "execution_count": 4,
963
  "metadata": {},
964
  "output_type": "execute_result"
965
  }
966
  ],
967
  "source": [
968
+ "# Adjust column names\n",
969
+ "final_df = combined_df.rename(\n",
970
  " columns={\n",
971
  " \"RegionID\": \"Region ID\",\n",
972
  " \"SizeRank\": \"Size Rank\",\n",
 
981
  },
982
  {
983
  "cell_type": "code",
984
+ "execution_count": 5,
985
  "metadata": {},
986
  "outputs": [],
987
  "source": [
988
+ "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
 
 
 
989
  ]
990
  }
991
  ],