File size: 4,084 Bytes
3d57822
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
'''
git clone https://github.com/geopandas/geopandas.git
cd geopandas
pip install .
'''
import requests
import pandas as pd
import numpy as np
import requests
import geopandas as gpd
from shapely.geometry import Point

# load neighborhood GeoJson file and housing dataset
neighborhood = gpd.read_file("https://raw.githubusercontent.com/HathawayLiu/Housing_dataset/main/Neighborhood_Map_Atlas_Districts.geojson")
url = "https://github.com/HathawayLiu/Housing_dataset/raw/main/Building_Permits_20240213.csv"
df = pd.read_csv(url)

# Pre-processing of data
df['OriginalZip'] = pd.to_numeric(df['OriginalZip'], errors='coerce').fillna('NA').astype(str)
df['OriginalZip'] = df['OriginalZip'].replace(0, 'NA')
df['OriginalCity'] = df['OriginalCity'].fillna('SEATTLE')
df['OriginalState'] = df['OriginalState'].fillna('WA')
df['EstProjectCost'] = pd.to_numeric(df['EstProjectCost'], errors='coerce').astype(float)
df['IssuedDate'] = pd.to_datetime(df['IssuedDate'], errors='coerce')
df['HousingUnits'] = pd.to_numeric(df['HousingUnits'], errors='coerce').fillna(0).astype(int)
df['HousingUnitsRemoved'] = pd.to_numeric(df['HousingUnitsRemoved'], errors='coerce').fillna(0).astype(int)
df['HousingUnitsAdded'] = pd.to_numeric(df['HousingUnitsAdded'], errors='coerce').fillna(0).astype(int)
df['Longitude'] = pd.to_numeric(df['Longitude'], errors='coerce')
df['Latitude'] = pd.to_numeric(df['Latitude'], errors='coerce')

# Function to get the zip code from coordinates
def get_zip_code_from_coordinates(latitude, longitude, api_key):
    if pd.isna(latitude) or pd.isna(longitude):
        return 'NA'  # Return 'NA' if latitude or longitude is NaN

    api_url = f"https://maps.googleapis.com/maps/api/geocode/json?latlng={latitude},{longitude}&key={api_key}"
    response = requests.get(api_url)
    
    if response.status_code == 200:
        data = response.json()
        if data['results']:
            for component in data['results'][0]['address_components']:
                if 'postal_code' in component['types']:
                    return component['long_name']
        return 'NA'  # Return 'NA' if no zip code found
    else:
        return 'NA'  # Return 'NA' for non-200 responses

# Apply the function only to rows where 'OriginalZip' is 'NA'
api_key = 'Your Own API Key' 
for index, row in df.iterrows():
    if row['OriginalZip'] == 'NA':
        zip_code = get_zip_code_from_coordinates(row['Latitude'], row['Longitude'], api_key)
        df.at[index, 'OriginalZip'] = zip_code
        print(f"Updated row {index} with Zip Code: {zip_code}")
        
# Function to get corresponding neighborhood district from coordinates
gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.Longitude, df.Latitude), crs='EPSG:4326')
def get_neighborhood_name(point, neighborhoods):
    for _, row in neighborhoods.iterrows():
        if point.within(row['geometry']):
            print(row['L_HOOD'])
            return row['L_HOOD']
    return 'NA'
# Apply the function to each row
gdf['NeighborDistrict'] = gdf['geometry'].apply(lambda x: get_neighborhood_name(x, neighborhood) if pd.notna(x) else 'NA')
# Merge the new column back to the original DataFrame
df['NeighborDistrict'] = gdf['NeighborDistrict']
# filtered df to start from year 2000
df_filtered = df[df['IssuedDate'].dt.year >= 2000]
df_filtered['IssuedDate'] = df['IssuedDate'].astype(str)
df_filtered.fillna('NA', inplace=True)

'''
Following code is for spliting datasets in train and test dataset
'''
# Read the dataset
housing_df = pd.read_csv('https://github.com/HathawayLiu/Housing_dataset/raw/main/Building_Permits_Cleaned.csv')
# Shuffle the dataset
housing_df = housing_df.sample(frac=1).reset_index(drop=True)

# Splitting the dataset into training and test sets
split_ratio = 0.8  # 80% for training, 20% for testing
split_index = int(len(housing_df) * split_ratio)

train_df = housing_df[:split_index]
test_df = housing_df[split_index:]

# Export to CSV
train_df.to_csv('/Users/hathawayliu/Desktop/train_dataset.csv', index=False)
test_df.to_csv('/Users/hathawayliu/Desktop/test_dataset.csv', index=False)