pad-us / AK-HI-preprocess.py
Carl Boettiger
AK-HI
ef3b477
# +
import ibis
import ibis.selectors as s
from ibis import _
import fiona
import geopandas as gpd
import rioxarray
from shapely.geometry import box
vec_file = 'pad-AK-HI-stats.parquet'
# +
fgb = "https://data.source.coop/cboettig/pad-us-3/pad-us3-combined.fgb"
parquet = "https://data.source.coop/cboettig/pad-us-3/pad-us3-combined.parquet"
# gdb = "https://data.source.coop/cboettig/pad-us-3/PADUS3/PAD_US3_0.gdb" # original, all tables
con = ibis.duckdb.connect()
con.load_extension("spatial")
threads = 1
# or read the fgb version, much slower
# pad = con.read_geo(fgb)
# pad = con.read_parquet(parquet)
# Currently ibis doesn't detect that this is GeoParquet. We need a SQL escape-hatch to cast the geometry
agency_name = con.read_parquet("https://huggingface.co/datasets/boettiger-lab/pad-us-3/resolve/main/parquet/pad-agency-name.parquet").select(manager_name_id = "Code", manager_name = "Dom")
agency_type = con.read_parquet("https://huggingface.co/datasets/boettiger-lab/pad-us-3/resolve/main/parquet/pad-agency-type.parquet").select(manager_type_id = "Code", manager_type = "Dom")
desig_type = con.read_parquet("https://huggingface.co/datasets/boettiger-lab/pad-us-3/resolve/main/parquet/pad-desgination-type.parquet").select(designation_type_id = "Code", designation_type = "Dom")
public_access = con.read_parquet("https://huggingface.co/datasets/boettiger-lab/pad-us-3/resolve/main/parquet/pad-public-access.parquet").select(public_access_id = "Code", public_access = "Dom")
state_name = con.read_parquet("https://huggingface.co/datasets/boettiger-lab/pad-us-3/resolve/main/parquet/pad-state-name.parquet").select(state = "Code", state_name = "Dom")
iucn = con.read_parquet("https://huggingface.co/datasets/boettiger-lab/pad-us-3/resolve/main/parquet/pad-iucn.parquet").select(iucn_code = "CODE", iucn_category = "DOM")
con.raw_sql(f"CREATE OR REPLACE VIEW pad AS SELECT *, st_geomfromwkb(geometry) as geom from read_parquet('{parquet}')")
pad = con.table("pad")
# -
# Get the CRS
# fiona is not built with parquet support, must read this from fgb. ideally duckdb's st_read_meta would do this from the parquet
meta = fiona.open(fgb)
crs = meta.crs
# Now we can do all the usual SQL queries to subset the data. Note the `geom.within()` spatial filter!
focal_columns = ["row_n", "FeatClass", "Mang_Name",
"Mang_Type", "Des_Tp", "Pub_Access",
"GAP_Sts", "IUCN_Cat", "Unit_Nm",
"State_Nm", "EsmtHldr", "Date_Est",
"SHAPE_Area", "geom"]
(
pad
.mutate(row_n=ibis.row_number())
.filter(_.FeatClass.isin(["Easement", "Fee"]))
.filter(_.State_Nm.isin(["AK", "HI"]))
.select(focal_columns)
.rename(geometry="geom")
.rename(manager_name_id = "Mang_Name",
manager_type_id = "Mang_Type",
designation_type_id = "Des_Tp",
public_access_id = "Pub_Access",
category = "FeatClass",
iucn_code = "IUCN_Cat",
gap_code = "GAP_Sts",
state = "State_Nm",
easement_holder = "EsmtHldr",
date_established = "Date_Est",
area_square_meters = "SHAPE_Area",
area_name = "Unit_Nm")
.left_join(agency_name, "manager_name_id")
.left_join(agency_type, "manager_type_id")
.left_join(desig_type, "designation_type_id")
.left_join(public_access, "public_access_id")
.left_join(state_name, "state")
.left_join(iucn, "iucn_code")
.select(~s.contains("_right"))
# .select(~s.contains("_id"))
# if we keep the original geoparquet WKB 'geometry' column, to_pandas() (or execute) gives us only a normal pandas data.frame, and geopandas doesn't see the metadata.
# if we replace the geometry with duckdb-native 'geometry' type, to_pandas() gives us a geopanadas! But requires reading into RAM.
.to_pandas()
.set_crs(crs)
.to_parquet(vec_file)
)
# +
import rasterio
from rasterstats import zonal_stats
import geopandas as gpd
import pandas as pd
from joblib import Parallel, delayed
def big_zonal_stats(vec_file, tif_file, stats, col_name, n_jobs, verbose = 10, timeout=10000):
# read in vector as geopandas, match CRS to raster
with rasterio.open(tif_file) as src:
raster_profile = src.profile
gdf = gpd.read_parquet(vec_file).to_crs(raster_profile['crs'])
# row_n is a global id, may refer to excluded polygons
# gdf["row_id"] = gdf.index + 1
# lamba fn to zonal_stats a slice:
def get_stats(geom_slice, tif_file, stats):
stats = zonal_stats(geom_slice.geometry, tif_file, stats = stats)
stats[0]['row_n'] = geom_slice.row_n
# print(geom_slice.row_n)
return stats[0]
# iteratation (could be a list comprehension?)
jobs = []
for r in gdf.itertuples():
jobs.append(delayed(get_stats)(r, tif_file, stats))
# And here we go
output = Parallel(n_jobs=n_jobs, timeout=timeout, verbose=verbose)(jobs)
# reshape output
df = (
pd.DataFrame(output)
.rename(columns={'mean': col_name})
.merge(gdf, how='right', on = 'row_n')
)
gdf = gpd.GeoDataFrame(df, geometry="geometry")
return gdf
# -
tif_file = "/home/rstudio/boettiger-lab/us-pa-policy/hfp_2021_100m_v1-2_cog.tif"
threads=1
# +
#import geopandas as gpd
#test = gpd.read_parquet("pad-processed.parquet")
#test.columns
# +
# %%time
#
tif_file = "/home/rstudio/boettiger-lab/us-pa-policy/hfp_2021_100m_v1-2_cog.tif"
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'],
col_name = "human_impact", n_jobs=1, verbose=0)
gpd.GeoDataFrame(df, geometry="geometry").to_parquet(vec_file)
# -
# %%time
tif_file = '/home/rstudio/source.coop/cboettig/mobi/species-richness-all/SpeciesRichness_All.tif'
big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = "richness", n_jobs=threads, verbose=0).to_parquet(vec_file)
# +
# %%time
tif_file = '/home/rstudio/source.coop/cboettig/mobi/range-size-rarity-all/RSR_All.tif'
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'],
col_name = "rsr", n_jobs=threads, verbose=0).to_parquet(vec_file)
# +
# %%time
tif_file = '/home/rstudio/source.coop/vizzuality/lg-land-carbon-data/deforest_carbon_100m_cog.tif'
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'],
col_name = "deforest_carbon", n_jobs=threads, verbose=0).to_parquet(vec_file)
# +
# %%time
tif_file = '/home/rstudio/source.coop/vizzuality/lg-land-carbon-data/natcrop_bii_100m_cog.tif'
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'],
col_name = "biodiversity_intactness_loss", n_jobs=threads, verbose=0).to_parquet(vec_file)
# +
# %%time
tif_file = '/home/rstudio/source.coop/vizzuality/lg-land-carbon-data/natcrop_fii_100m_cog.tif'
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'],
col_name = "forest_integrity_loss", n_jobs=threads, verbose=0).to_parquet(vec_file)
# +
# %%time
tif_file = '/home/rstudio/source.coop/vizzuality/lg-land-carbon-data/natcrop_expansion_100m_cog.tif'
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = "crop_expansion", n_jobs=threads, verbose=0)
gpd.GeoDataFrame(df, geometry="geometry").to_parquet(vec_file)
# +
# %%time
tif_file = '/home/rstudio/source.coop/vizzuality/lg-land-carbon-data/natcrop_reduction_100m_cog.tif'
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = "crop_reduction", n_jobs=threads, verbose=0).to_parquet(vec_file)
# +
# %%time
tif_file = '/home/rstudio/source.coop/cboettig/carbon/cogs/irrecoverable_c_total_2018.tif'
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = "irrecoverable_carbon", n_jobs=threads, verbose=0).to_parquet(vec_file)
# +
# %%time
tif_file = '/home/rstudio/source.coop/cboettig/carbon/cogs/manageable_c_total_2018.tif'
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = "manageable_carbon", n_jobs=threads, verbose=0).to_parquet(vec_file)
# +
# %%time
tif_file = '/home/rstudio/minio/shared-biodiversity/redlist/cog/combined_rwr_2022.tif'
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = "all_species_rwr", n_jobs=threads, verbose=0).to_parquet(vec_file)
# +
# %%time
tif_file = '/home/rstudio/minio/shared-biodiversity/redlist/cog/combined_sr_2022.tif'
df = big_zonal_stats(vec_file, tif_file, stats = ['mean'], col_name = "all_species_richness", n_jobs=threads, verbose=0).to_parquet(vec_file)
# +
columns = '''
area_name,
manager_name,
manager_name_id,
manager_type,
manager_type_id,
manager_group,
designation_type,
designation_type_id,
public_access,
category,
iucn_code,
iucn_category,
gap_code,
state,
state_name,
easement_holder,
date_established,
area_square_meters,
geometry,
all_species_richness,
all_species_rwr,
manageable_carbon,
irrecoverable_carbon,
crop_reduction,
crop_expansion,
deforest_carbon,
richness,
rsr,
forest_integrity_loss,
biodiversity_intactness_loss
'''
items = columns.split(',')
# Remove empty strings and whitespace
items = [item.strip() for item in items if item.strip()]
items
# -
import ibis
from ibis import _
df = ibis.read_parquet(vec_file).select(items).to_parquet(vec_file)
import ibis
from ibis import _
ibis.read_parquet("pad-AK-HI-stats.parquet")