Urban_Tree_Canopy_in_Durham / urban_tree_canopy_in_durham.py
Ziyuan111's picture
Upload urban_tree_canopy_in_durham.py
5a63e98 verified
# -*- coding: utf-8 -*-
"""Urban_Tree_Canopy_in_Durham
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1X59zPtI7ydiX10ZnfjsNGvnKNTXgwrWs
"""
! pip install datasets
import csv
import json
import os
from typing import List
import datasets
import logging
from datasets import DatasetBuilder, DownloadManager, SplitGenerator, Split
import zipfile
import json
import pandas as pd
import geopandas as gpd
class Urban_Tree_Canopy_in_Durham(DatasetBuilder):
# Define the `_info` method, which provides dataset metadata
def _info(self):
return DatasetInfo(
description="A description of the dataset.",
features=Features(
{
"objectid": Value("int32"),
"streetaddr": Value("string"),
"city_x": Value("string"),
"zipcode_x": Value("string"),
"facilityid_x": Value("string"),
"present_x": Value("string"),
"genus_x": Value("string"),
"species_x": Value("string"),
"commonname_x": Value("string"),
"plantingda": Value("datetime"),
"diameterin_x": Value("float"),
"heightft_x": Value("float"),
"condition_x": Value("string"),
"contractwo": Value("string"),
"neighborho": Value("string"),
"program_x": Value("string"),
"plantingw_x": Value("string"),
"plantingco": Value("string"),
"underpwerl": Value("string"),
"matureheig": Value("float"),
"globalid_x": Value("string"),
"created_us": Value("string"),
"created_da": Value("datetime"),
"last_edite": Value("string"),
"last_edi_1": Value("datetime"),
"isoprene_x": Value("float"),
"monoterpen": Value("float"),
"vocs_x": Value("float"),
"coremoved_": Value("float"),
"coremove_1": Value("float"),
"o3removed_": Value("float"),
"o3remove_1": Value("float"),
"no2removed": Value("float"),
"no2remov_1": Value("float"),
"so2removed": Value("float"),
"so2remov_1": Value("float"),
"pm10remove": Value("float"),
"pm10remo_1": Value("float"),
"pm25remove": Value("float"),
"o2producti": Value("float"),
"replaceval": Value("float"),
"carbonstor": Value("float"),
"carbonst_1": Value("float"),
"grosscarse": Value("float"),
"grosscar_1": Value("float"),
"avoidrunof": Value("float"),
"avoidrun_1": Value("float"),
"polremoved": Value("float"),
"polremov_1": Value("float"),
"totannbene": Value("float"),
"leafarea_s": Value("float"),
"potevapotr": Value("float"),
"evaporatio": Value("float"),
"transpirat": Value("float"),
"h2ointerce": Value("float"),
"avoidrunva": Value("float"),
"avoidrun_2": Value("float"),
"carbonavoi": Value("float"),
"carbonav_1": Value("float"),
"heating_mb": Value("float"),
"heating_do": Value("float"),
"heating_kw": Value("float"),
"heating__1": Value("float"),
"cooling_kw": Value("float"),
"cooling_do": Value("float"),
"totalenerg": Value("float"),
"geometry_x": Value("string"),
"x": Value("float"),
"y": Value("float"),
"streetaddress_x": Value("string"),
"city_y": Value("string"),
"zipcode_y": Value("string"),
"facilityid_y": Value("string"),
"present_y": Value("string"),
"genus_y": Value("string"),
"species_y": Value("string"),
"commonname_y": Value("string"),
"plantingdate_x": Value("datetime"),
"diameterin_y": Value("float"),
"heightft_y": Value("float"),
"condition_y": Value("string"),
"contractwork_x": Value("string"),
"neighborhood_x": Value("string"),
"program_y": Value("string"),
"plantingw_y": Value("string"),
"plantingcond_x": Value("string"),
"underpwerlins_x": Value("string"),
"matureheight_x": Value("float"),
"globalid_y": Value("string"),
"created_user_x": Value("string"),
"created_date_x": Value("datetime"),
"last_edited_user_x": Value("string"),
"last_edited_date_x": Value("datetime"),
"isoprene_y": Value("float"),
"monoterpene_x": Value("float"),
"vocs_y": Value("float"),
"coremoved_ozperyr_x": Value("float"),
"coremoved_dolperyr_x": Value("float"),
"o3removed_ozperyr_x": Value("float"),
"o3removed_dolperyr_x": Value("float"),
"no2removed_ozperyr_x": Value("float"),
"no2removed_dolperyr_x": Value("float"),
"so2removed_ozperyr_x": Value("float"),
"so2removed_dolperyr_x": Value("float"),
"pm10removed_dolperyr_y":Value("float"),
"pm25removed_ozperyr_y":Value("float"),
"o2production_lbperyr_y":Value("float"),
"replacevalue_dol_y":Value("float"),
"carbonstorage_lb_y":Value("float"),
"carbonstorage_dol_y":Value("float"),
"grosscarseq_lbperyr_y":Value("float"),
"grosscarseq_dolperyr_y":Value("float"),
"avoidrunoff_ft2peryr":Value("float"),
}
),
supervised_keys=None,
homepage="https://github.com/AuraMa111/Urban_Tree_Canopy_in_Durham",
citation="A citation or reference to the source of the dataset.",
)
# ... (include _info method here)
def _split_generators(self, dl_manager: DownloadManager):
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
# Download the source data
downloaded_files = dl_manager.download_and_extract({
"csv": "https://raw.githubusercontent.com/AuraMa111/Urban_Tree_Canopy_in_Durham/main/Trees_%2526_Planting_Sites.csv",
"geojson_zip": "https://raw.githubusercontent.com/AuraMa111/Urban_Tree_Canopy_in_Durham/main/Trees_%2526_Planting_Sites.geojson.zip",
"zip": "https://raw.githubusercontent.com/AuraMa111/Urban_Tree_Canopy_in_Durham/main/TreesPlanting_Sites.zip"
})
# Return split generators
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"file_path_csv": downloaded_files["csv"],
"file_path_zip": downloaded_files["zip"],
"file_path_geojson_zip": downloaded_files["geojson_zip"],
},
),
# Add other splits if necessary
]
def _generate_examples(self, file_path_csv, file_path_zip, file_path_geojson_zip):
# Generate examples from CSV
csv_df = self.process_csv_file(file_path_csv)
# Generate examples from Shapefiles within ZIP
shp_gdf = self.process_zip_shapefiles(file_path_zip)
# Generate examples from GeoJSON within ZIP
geojson_gdf = self.process_zip_geojson(file_path_geojson_zip)
# Merge the DataFrames
combined_gdf = self.merge_dataframes(csv_df, shp_gdf, geojson_gdf)
# Generate final examples
for idx, example in self.generate_examples_from_merged_data(combined_gdf):
yield idx, example
def process_csv_file(self, file_path):
with open(file_path, 'r') as f:
csv_df = pd.read_csv(f)
csv_df.drop_duplicates(inplace=True)
csv_df.fillna(method='bfill', inplace=True)
csv_df.columns = csv_df.columns.str.lower().str.replace(' ', '_')
csv_df['objectid'] = csv_df['objectid'].astype(int)
return csv_df
def process_zip_shapefiles(self, file_path):
with zipfile.ZipFile(file_path, 'r') as z:
for file_name in z.namelist():
if file_name.endswith(".shp"):
with z.open(file_name) as file:
shp_gdf = gpd.read_file(file)
shp_gdf.columns = shp_gdf.columns.str.lower().str.replace(' ', '_')
shp_gdf['objectid'] = shp_gdf['objectid'].astype(int)
return shp_gdf
def process_zip_geojson(self, file_path):
with zipfile.ZipFile(file_path, 'r') as z:
for file_name in z.namelist():
if file_name.endswith(".geojson"):
with z.open(file_name) as file:
geojson_data = json.load(file)
geojson_gdf = gpd.GeoDataFrame.from_features(geojson_data['features'])
geojson_gdf.columns = geojson_gdf.columns.str.lower().str.replace(' ', '_')
geojson_gdf['objectid'] = geojson_gdf['objectid'].astype(int)
return geojson_gdf
def merge_dataframes(self, csv_df, shp_gdf, geojson_gdf):
combined_gdf = shp_gdf.merge(csv_df, on='objectid', how='inner')
combined_gdf = combined_gdf.merge(geojson_gdf, on='objectid', how='left')
return combined_gdf
def generate_examples_from_merged_data(self, combined_gdf):
for idx, row in combined_gdf.iterrows():
example = row.to_dict()
if 'geometry' in row and row['geometry'] is not None:
example['geometry'] = json.loads(gpd.GeoSeries([row['geometry']]).to_json())['features'][0]['geometry']
yield idx, example
def plot_spatial_distribution(self, gdf, lat_col='latitude', lon_col='longitude', color_col='species', hover_col='species'):
"""
Visualize the spatial distribution of the data using Plotly.
Parameters:
- gdf: GeoDataFrame to be visualized.
- lat_col: String, name of the column with latitude values.
- lon_col: String, name of the column with longitude values.
- color_col: String, name of the column to determine the color of points.
- hover_col: String, name of the column to show when hovering over points.
"""
center_lat = gdf[lat_col].mean()
center_lon = gdf[lon_col].mean()
fig = px.scatter_mapbox(gdf,
lat=lat_col,
lon=lon_col,
color=color_col,
hover_name=hover_col,
center={"lat": center_lat, "lon": center_lon},
zoom=10,
height=600,
width=800)
fig.update_layout(mapbox_style="open-street-map")
fig.show()
def plot_correlation_heatmap(self, gdf, columns, figsize=(10, 8), cmap='coolwarm'):
"""
Plot a heatmap of the correlation matrix for selected columns in the GeoDataFrame.
Parameters:
- gdf: GeoDataFrame containing the data.
- columns: List of columns to include in the correlation matrix.
- figsize: Tuple of figure size dimensions (width, height).
- cmap: Colormap for the heatmap.
"""
# Select only the columns with environmental data
env_data = gdf[columns]
# Compute the correlation matrix
corr = env_data.corr()
# Set up the matplotlib figure
plt.figure(figsize=figsize)
# Generate a heatmap
sns.heatmap(corr, annot=True, fmt=".2f", cmap=cmap, square=True, linewidths=.5, cbar_kws={"shrink": .5})
# Optional: Adjust the layout
plt.tight_layout()
# Show the plot
plt.show()
# Usage example:
# data_processor = DataProcessor()
# for key, example in data_processor._generate_examples(csv_path, zip_path, geojson_zip_path):
# # Do something with key and example
# Usage example:
# data_processor = DataProcessor()
# for key, example in data_processor._generate_examples(csv_path, zip_path, geojson_zip_path):
# # Do something with key and example
# combined_gdf = data_processor.merge_dataframes(csv_df, shp_gdf, geojson_gdf)
# data_processor.plot_spatial_distribution(combined_gdf, lat_col='y', lon_col='x', color_col='species_x', hover_col='species_x')