xx103 commited on
Commit
c53ad97
1 Parent(s): 7bfa048

Upload data_processing.py

Browse files
Files changed (1) hide show
  1. data_processing.py +154 -0
data_processing.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from datetime import datetime
3
+ from uszipcode import SearchEngine
4
+ from datasets import load_dataset
5
+
6
+ # dataset path to NYC_collisions_data JSON
7
+ dataset_path = "xx103/NYC_Motor_Vehicle_Collisions_and_Weather_Dataset"
8
+ data_files1 = ["df1_NYC_collisions_data.json"]
9
+
10
+ dataset1 = load_dataset(path=dataset_path, data_files=data_files1)
11
+
12
+ # Convert to pandas DataFrame
13
+ df = dataset1['train'].to_pandas()
14
+
15
+ # convert 'CRASH TIME' to datetime to further extract the hours and minutes
16
+ df['CRASH TIME'] = pd.to_datetime(df['CRASH TIME'], format='%H:%M')
17
+
18
+ # Update 'CRASH TIME' to 'CRASH TIME PERIOD', 'CONTRIBUTING FACTOR VEHICLES', 'VEHICLE TYPES', 'STREET NAME' and 'STREET TYPE'
19
+ for index, row in df.iterrows():
20
+ hour = row['CRASH TIME'].hour
21
+ period_start = (hour // 3) * 3
22
+ period_end = period_start + 2
23
+ df.at[index, 'CRASH TIME PERIOD'] = f"{period_start:02d}:00-{period_end:02d}:59"
24
+
25
+ factors = [row[f'CONTRIBUTING FACTOR VEHICLE {i}'] for i in range(1, 6) if row.get(f'CONTRIBUTING FACTOR VEHICLE {i}')]
26
+ df.at[index, 'CONTRIBUTING FACTOR VEHICLES'] = ', '.join(factors)
27
+
28
+ vehicle_types = [row[f'VEHICLE TYPE CODE {i}'] for i in range(1, 6) if row.get(f'VEHICLE TYPE CODE {i}')]
29
+ df.at[index, 'VEHICLE TYPES'] = ', '.join(vehicle_types)
30
+
31
+ street_names = []
32
+ street_types = []
33
+
34
+ # Check and append 'ON STREET NAME'
35
+ if pd.notna(row['ON STREET NAME']) and row['ON STREET NAME'] != '':
36
+ street_names.append(row['ON STREET NAME'])
37
+ street_types.append('ON STREET')
38
+
39
+ # Check and append 'CROSS STREET NAME'
40
+ if pd.notna(row['CROSS STREET NAME']) and row['CROSS STREET NAME'] != '':
41
+ street_names.append(row['CROSS STREET NAME'])
42
+ street_types.append('CROSS STREET')
43
+
44
+ # Check and append 'OFF STREET NAME'
45
+ if pd.notna(row['OFF STREET NAME']) and row['OFF STREET NAME'] != '':
46
+ street_names.append(row['OFF STREET NAME'])
47
+ street_types.append('OFF STREET')
48
+
49
+ # Join the names and types with a comma
50
+ df.at[index, 'STREET NAME'] = ', '.join(street_names)
51
+ df.at[index, 'STREET TYPE'] = ', '.join(street_types)
52
+
53
+
54
+ # Convert specific columns to numeric
55
+ numeric_columns = ['NUMBER OF PERSONS INJURED', 'NUMBER OF PEDESTRIANS INJURED', 'NUMBER OF CYCLIST INJURED', 'NUMBER OF MOTORIST INJURED',
56
+ 'NUMBER OF PERSONS KILLED', 'NUMBER OF PEDESTRIANS KILLED', 'NUMBER OF CYCLIST KILLED', 'NUMBER OF MOTORIST KILLED']
57
+ for column in numeric_columns:
58
+ df[column] = pd.to_numeric(df[column], errors='coerce').fillna(0).astype(int)
59
+
60
+ # add new columns 'NUMBER OF INJURIES' and 'NUMBER OF DEATHS'
61
+ df['NUMBER OF INJURIES'] = df['NUMBER OF PERSONS INJURED'] + df['NUMBER OF PEDESTRIANS INJURED'] + df['NUMBER OF CYCLIST INJURED'] + df['NUMBER OF MOTORIST INJURED']
62
+ df['NUMBER OF DEATHS'] = df['NUMBER OF PERSONS KILLED'] + df['NUMBER OF PEDESTRIANS KILLED'] + df['NUMBER OF CYCLIST KILLED'] + df['NUMBER OF MOTORIST KILLED']
63
+
64
+ # Keeping only the necessary columns
65
+ columns_to_keep = [
66
+ 'CRASH DATE', 'BOROUGH', 'ZIP CODE', 'LATITUDE', 'LONGITUDE', 'COLLISION_ID',
67
+ 'CRASH TIME PERIOD', 'CONTRIBUTING FACTOR VEHICLES', 'VEHICLE TYPES',
68
+ 'NUMBER OF INJURIES', 'NUMBER OF DEATHS', 'STREET NAME', 'STREET TYPE'
69
+ ]
70
+ df = df[columns_to_keep]
71
+
72
+ # Create a SearchEngine object
73
+ search = SearchEngine()
74
+
75
+ # Convert 'LATITUDE' and 'LONGITUDE' to floats
76
+ df['LATITUDE'] = pd.to_numeric(df['LATITUDE'], errors='coerce')
77
+ df['LONGITUDE'] = pd.to_numeric(df['LONGITUDE'], errors='coerce')
78
+
79
+
80
+ # fill in the missing 'ZIP CODE' if it has valid 'LATITUDE' and 'LONGITUDE'
81
+ for index, row in df.iterrows():
82
+ # Check if 'ZIP CODE' is an empty string and 'LATITUDE' and 'LONGITUDE' are valid
83
+ if row['ZIP CODE'] == '' and not (pd.isna(row['LATITUDE']) or row['LATITUDE'] == 0) and not (pd.isna(row['LONGITUDE']) or row['LONGITUDE'] == 0):
84
+ result = search.by_coordinates(lat=row['LATITUDE'], lng=row['LONGITUDE'], returns=1)
85
+ if result:
86
+ # Set the 'ZIP CODE' to the found zip code
87
+ df.at[index, 'ZIP CODE'] = result[0].zipcode
88
+
89
+ # dataset path to NYC_borough_data JSON
90
+ dataset_path = "xx103/NYC_Motor_Vehicle_Collisions_and_Weather_Dataset"
91
+ data_files2 = ["df2_NYC_borough_data.json"]
92
+
93
+ dataset2 = load_dataset(path=dataset_path, data_files=data_files2)
94
+
95
+ # Convert to pandas DataFrame
96
+ df2 = dataset2['train'].to_pandas()
97
+ # Convert the 'Borough' column to uppercase
98
+ df2['Borough'] = df2['Borough'].str.upper()
99
+
100
+ # Create a mapping dictionary from ZIP Code to Borough
101
+ zip_to_borough = df2.set_index('ZIP Code')['Borough'].to_dict()
102
+
103
+ # Function to update BOROUGH based on ZIP CODE
104
+ def update_borough(row):
105
+ if pd.isna(row['BOROUGH']) or row['BOROUGH'] == '':
106
+ return zip_to_borough.get(row['ZIP CODE'], row['BOROUGH'])
107
+ else:
108
+ return row['BOROUGH']
109
+
110
+ # Apply the function to each row in df
111
+ df['BOROUGH'] = df.apply(update_borough, axis=1)
112
+
113
+ # dataset path to NYC_collisions_data JSON
114
+ dataset_path = "xx103/NYC_Motor_Vehicle_Collisions_and_Weather_Dataset"
115
+ data_files3 = ["df3_NYC_weather_data.json"]
116
+
117
+ dataset3 = load_dataset(path=dataset_path, data_files=data_files3)
118
+
119
+ # Convert to pandas DataFrame
120
+ df3 = dataset3['train'].to_pandas()
121
+
122
+ # Keep only the specified columns
123
+ df3 = df3[['datetime', 'description', 'precip', 'preciptype', 'tempmax', 'tempmin']]
124
+
125
+ # Rename the columns
126
+ df3.rename(columns={
127
+ 'description': 'WEATHER DESCRIPTION',
128
+ 'precip': 'PRECIPITATION',
129
+ 'preciptype': 'PRECIPITATION TYPE',
130
+ 'tempmax': 'TEMPMAX',
131
+ 'tempmin': 'TEMPMIN'
132
+ }, inplace=True)
133
+
134
+ # Convert 'CRASH DATE' to datetime and remove the time component
135
+ df['CRASH DATE'] = pd.to_datetime(df['CRASH DATE']).dt.date
136
+
137
+ # Convert 'datetime' in df3 to datetime and remove the time component
138
+ df3['datetime'] = pd.to_datetime(df3['datetime']).dt.date
139
+
140
+ # Perform the merge
141
+ merged_df = pd.merge(left=df, right=df3, how='left', left_on='CRASH DATE', right_on='datetime')
142
+
143
+ # Drop the 'datetime' column from df3 as it's redundant now
144
+ merged_df.drop(columns=['datetime'], inplace=True)
145
+
146
+ # print the first row of merged_df
147
+ # print(merged_df.iloc[0])
148
+
149
+ # Specify the path to the new JSON file
150
+ # new_file_path = '/Users/suzyxie/Desktop/hugging_face_data/NYC_collisions_weather_merged_data.json'
151
+
152
+ # Save the DataFrame to a JSON file
153
+ # merged_df.to_json(new_file_path, orient='records', lines=True)
154
+