zillow / processors /home_values.py
misikoff's picture
Revert "feat: try removing all non essential python and notebook files"
c83a125
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import os
from helpers import (
get_data_path_for_config,
get_combined_df,
save_final_df_as_jsonl,
handle_slug_column_mappings,
set_home_type,
)
# In[2]:
CONFIG_NAME = "home_values"
# In[3]:
data_frames = []
slug_column_mappings = {
"_tier_0.0_0.33_": "Bottom Tier ZHVI",
"_tier_0.33_0.67_": "Mid Tier ZHVI",
"_tier_0.67_1.0_": "Top Tier ZHVI",
"": "ZHVI",
}
data_dir_path = get_data_path_for_config(CONFIG_NAME)
for filename in os.listdir(data_dir_path):
if filename.endswith(".csv"):
print("processing " + filename)
cur_df = pd.read_csv(os.path.join(data_dir_path, filename))
exclude_columns = [
"RegionID",
"SizeRank",
"RegionName",
"RegionType",
"StateName",
"Bedroom Count",
"Home Type",
]
if "Zip" in filename:
continue
if "Neighborhood" in filename:
continue
if "City" in filename:
continue
if "Metro" in filename:
continue
if "County" in filename:
continue
if "City" in filename:
exclude_columns = exclude_columns + ["State", "Metro", "CountyName"]
elif "Zip" in filename:
exclude_columns = exclude_columns + [
"State",
"City",
"Metro",
"CountyName",
]
elif "County" in filename:
exclude_columns = exclude_columns + [
"State",
"Metro",
"StateCodeFIPS",
"MunicipalCodeFIPS",
]
elif "Neighborhood" in filename:
exclude_columns = exclude_columns + [
"State",
"City",
"Metro",
"CountyName",
]
if "_bdrmcnt_1_" in filename:
cur_df["Bedroom Count"] = "1-Bedroom"
elif "_bdrmcnt_2_" in filename:
cur_df["Bedroom Count"] = "2-Bedrooms"
elif "_bdrmcnt_3_" in filename:
cur_df["Bedroom Count"] = "3-Bedrooms"
elif "_bdrmcnt_4_" in filename:
cur_df["Bedroom Count"] = "4-Bedrooms"
elif "_bdrmcnt_5_" in filename:
cur_df["Bedroom Count"] = "5+-Bedrooms"
else:
cur_df["Bedroom Count"] = "All Bedrooms"
cur_df = set_home_type(cur_df, filename)
cur_df["StateName"] = cur_df["StateName"].astype(str)
cur_df["RegionName"] = cur_df["RegionName"].astype(str)
data_frames = handle_slug_column_mappings(
data_frames, slug_column_mappings, exclude_columns, filename, cur_df
)
combined_df = get_combined_df(
data_frames,
[
"RegionID",
"SizeRank",
"RegionName",
"RegionType",
"StateName",
"Bedroom Count",
"Home Type",
"Date",
],
)
combined_df
# In[4]:
final_df = combined_df
for index, row in final_df.iterrows():
if row["RegionType"] == "city":
final_df.at[index, "City"] = row["RegionName"]
elif row["RegionType"] == "county":
final_df.at[index, "County"] = row["RegionName"]
if row["RegionType"] == "state":
final_df.at[index, "StateName"] = row["RegionName"]
# coalesce State and StateName columns
# final_df["State"] = final_df["State"].combine_first(final_df["StateName"])
# final_df["County"] = final_df["County"].combine_first(final_df["CountyName"])
# final_df = final_df.drop(
# columns=[
# "StateName",
# # "CountyName"
# ]
# )
final_df
# In[5]:
final_df = final_df.rename(
columns={
"RegionID": "Region ID",
"SizeRank": "Size Rank",
"RegionName": "Region",
"RegionType": "Region Type",
"StateCodeFIPS": "State Code FIPS",
"StateName": "State",
"MunicipalCodeFIPS": "Municipal Code FIPS",
}
)
final_df["Date"] = pd.to_datetime(final_df["Date"], format="%Y-%m-%d")
final_df
# In[6]:
save_final_df_as_jsonl(CONFIG_NAME, final_df)