xx103 commited on
Commit
7bff708
1 Parent(s): 3ca93aa

Upload data_processing_final.py

Browse files
Files changed (1) hide show
  1. data_processing_final.py +148 -0
data_processing_final.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from botocore import UNSIGNED
2
+ from botocore.client import Config
3
+ import pandas as pd
4
+ import boto3
5
+ from datetime import datetime
6
+ from uszipcode import SearchEngine
7
+ import numpy as np
8
+
9
+
10
+ # Initialize the S3 client
11
+ s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
12
+
13
+ # Download 3 json files in S3 bucket, specified bucket name, file keys, and file names
14
+ bucket_name = 'sta663project1'
15
+ file_keys = ['NYC_collisions_data.json', 'NYC_borough_data.json', 'NYC_weather_data.json']
16
+ local_file_names = ['NYC_collisions_data.json', 'NYC_borough_data.json', 'NYC_weather_data.json']
17
+
18
+ for file_key, local_file_name in zip(file_keys, local_file_names):
19
+ s3.download_file(bucket_name, file_key, local_file_name)
20
+
21
+ # Load each file into a DataFrame, df is NYC collisions data, df2 is NYC borough data, df3 is NYC weather data
22
+ df = pd.read_json(local_file_names[0])
23
+ df2 = pd.read_json(local_file_names[1])
24
+ df3 = pd.read_json(local_file_names[2])
25
+
26
+ # convert 'CRASH TIME' to datetime to further extract the hours and minutes
27
+ df['CRASH TIME'] = pd.to_datetime(df['CRASH TIME'], format='%H:%M')
28
+
29
+ # In NYC collisions data, update 'CRASH TIME' to 'CRASH TIME PERIOD', 'CONTRIBUTING FACTOR VEHICLES', 'VEHICLE TYPES', 'STREET NAME' and 'STREET TYPE'
30
+ for index, row in df.iterrows():
31
+ hour = row['CRASH TIME'].hour
32
+ period_start = (hour // 3) * 3
33
+ period_end = period_start + 2
34
+ df.at[index, 'CRASH TIME PERIOD'] = f"{period_start:02d}:00-{period_end:02d}:59"
35
+
36
+ factors = [row[f'CONTRIBUTING FACTOR VEHICLE {i}'] for i in range(1, 6) if row.get(f'CONTRIBUTING FACTOR VEHICLE {i}')]
37
+ df.at[index, 'CONTRIBUTING FACTOR VEHICLES'] = ', '.join(factors)
38
+
39
+ vehicle_types = [row[f'VEHICLE TYPE CODE {i}'] for i in range(1, 6) if row.get(f'VEHICLE TYPE CODE {i}')]
40
+ df.at[index, 'VEHICLE TYPES'] = ', '.join(vehicle_types)
41
+
42
+ street_names = []
43
+ street_types = []
44
+
45
+ # Check and append 'ON STREET NAME'
46
+ if pd.notna(row['ON STREET NAME']) and row['ON STREET NAME'] != '':
47
+ street_names.append(row['ON STREET NAME'])
48
+ street_types.append('ON STREET')
49
+
50
+ # Check and append 'CROSS STREET NAME'
51
+ if pd.notna(row['CROSS STREET NAME']) and row['CROSS STREET NAME'] != '':
52
+ street_names.append(row['CROSS STREET NAME'])
53
+ street_types.append('CROSS STREET')
54
+
55
+ # Check and append 'OFF STREET NAME'
56
+ if pd.notna(row['OFF STREET NAME']) and row['OFF STREET NAME'] != '':
57
+ street_names.append(row['OFF STREET NAME'])
58
+ street_types.append('OFF STREET')
59
+
60
+ # Join the names and types with a comma
61
+ df.at[index, 'STREET NAME'] = ', '.join(street_names)
62
+ df.at[index, 'STREET TYPE'] = ', '.join(street_types)
63
+
64
+
65
+ # Convert number of injured, and number of killed columns to numeric
66
+ numeric_columns = ['NUMBER OF PERSONS INJURED', 'NUMBER OF PEDESTRIANS INJURED', 'NUMBER OF CYCLIST INJURED', 'NUMBER OF MOTORIST INJURED',
67
+ 'NUMBER OF PERSONS KILLED', 'NUMBER OF PEDESTRIANS KILLED', 'NUMBER OF CYCLIST KILLED', 'NUMBER OF MOTORIST KILLED']
68
+ for column in numeric_columns:
69
+ df[column] = pd.to_numeric(df[column], errors='coerce').fillna(0).astype(int)
70
+
71
+ # add new columns 'NUMBER OF INJURIES' and 'NUMBER OF DEATHS'
72
+ df['NUMBER OF INJURIES'] = df['NUMBER OF PERSONS INJURED'] + df['NUMBER OF PEDESTRIANS INJURED'] + df['NUMBER OF CYCLIST INJURED'] + df['NUMBER OF MOTORIST INJURED']
73
+ df['NUMBER OF DEATHS'] = df['NUMBER OF PERSONS KILLED'] + df['NUMBER OF PEDESTRIANS KILLED'] + df['NUMBER OF CYCLIST KILLED'] + df['NUMBER OF MOTORIST KILLED']
74
+
75
+ # Keeping only the necessary columns that are needed to merge with df2 and df3
76
+ columns_to_keep = [
77
+ 'CRASH DATE', 'BOROUGH', 'ZIP CODE', 'LATITUDE', 'LONGITUDE', 'COLLISION_ID',
78
+ 'CRASH TIME PERIOD', 'CONTRIBUTING FACTOR VEHICLES', 'VEHICLE TYPES',
79
+ 'NUMBER OF INJURIES', 'NUMBER OF DEATHS', 'STREET NAME', 'STREET TYPE'
80
+ ]
81
+ df = df[columns_to_keep]
82
+
83
+ # Create a SearchEngine object
84
+ search = SearchEngine()
85
+
86
+ # Convert 'LATITUDE' and 'LONGITUDE' in NYC collisions data to floats
87
+ df['LATITUDE'] = pd.to_numeric(df['LATITUDE'], errors='coerce')
88
+ df['LONGITUDE'] = pd.to_numeric(df['LONGITUDE'], errors='coerce')
89
+
90
+
91
+ # fill in the missing 'ZIP CODE' if it has valid 'LATITUDE' and 'LONGITUDE'
92
+ for index, row in df.iterrows():
93
+ # Check if 'ZIP CODE' is an empty string and 'LATITUDE' and 'LONGITUDE' are valid
94
+ if row['ZIP CODE'] == '' and not (pd.isna(row['LATITUDE']) or row['LATITUDE'] == 0) and not (pd.isna(row['LONGITUDE']) or row['LONGITUDE'] == 0):
95
+ result = search.by_coordinates(lat=row['LATITUDE'], lng=row['LONGITUDE'], returns=1)
96
+ if result:
97
+ # Set the 'ZIP CODE' to the found zip code
98
+ df.at[index, 'ZIP CODE'] = result[0].zipcode
99
+
100
+ # Convert the 'Borough' column in NYC borough data to uppercase
101
+ df2['Borough'] = df2['Borough'].str.upper()
102
+
103
+ # Create a mapping dictionary from ZIP Code to Borough
104
+ zip_to_borough = df2.set_index('ZIP Code')['Borough'].to_dict()
105
+
106
+ # Write a function update_borough to update BOROUGH based on ZIP CODE
107
+ def update_borough(row):
108
+ if pd.isna(row['BOROUGH']) or row['BOROUGH'] == '':
109
+ return zip_to_borough.get(row['ZIP CODE'], row['BOROUGH'])
110
+ else:
111
+ return row['BOROUGH']
112
+
113
+ # Apply the function to each row in NYC collisions data
114
+ df['BOROUGH'] = df.apply(update_borough, axis=1)
115
+
116
+ # Keep only the specified columns in NYC weather data
117
+ df3 = df3[['datetime', 'description', 'precip', 'preciptype', 'tempmax', 'tempmin']]
118
+
119
+ # Rename the columns in NYC weather data
120
+ df3.rename(columns={
121
+ 'description': 'WEATHER DESCRIPTION',
122
+ 'precip': 'PRECIPITATION',
123
+ 'preciptype': 'PRECIPITATION TYPE',
124
+ 'tempmax': 'TEMPMAX',
125
+ 'tempmin': 'TEMPMIN'
126
+ }, inplace=True)
127
+
128
+ # Convert 'CRASH DATE' to datetime and remove the time component
129
+ df['CRASH DATE'] = pd.to_datetime(df['CRASH DATE']).dt.date
130
+
131
+ # Convert 'datetime' in NYC weather data to datetime and remove the time component
132
+ df3['datetime'] = pd.to_datetime(df3['datetime']).dt.date
133
+
134
+ # Merge NYC collisions data and NYC weather data on 'CRASH DATE' and 'datetime' respectively, using left join.
135
+ merged_df = pd.merge(left=df, right=df3, how='left', left_on='CRASH DATE', right_on='datetime')
136
+
137
+ # Drop the 'datetime' column from df3 as it's redundant now
138
+ merged_df.drop(columns=['datetime'], inplace=True)
139
+
140
+
141
+ # Convert 'CRASH DATE' column to string to avoid messy date columns and conversion issues in Hugging Face
142
+ merged_df['CRASH DATE'] = merged_df['CRASH DATE'].astype(str)
143
+
144
+ # Replace empty values with NaN
145
+ merged_df = merged_df.replace('', np.nan)
146
+
147
+ # Print the first row of merged_df
148
+ print(merged_df.iloc[0])