french_temperatures_420M / normalization.py
La-matrice's picture
Upload normalization.py
e57783c verified
raw
history blame
1.93 kB
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
# Define normalization parameters
norm_params = {
'LAT': {'min_val': -66.817333, 'max_val': 51.055833},
'LON': {'min_val': -178.116667, 'max_val': 171.358333},
'ALTI': {'min_val': 0.0, 'max_val': 3845.0},
'AAAAMMJJHH': {'min_val': 1777010107, 'max_val': 2024030803},
'ANNEE': {'min_val': 1777, 'max_val': 2024},
'MOIS': {'min_val': 1, 'max_val': 12},
'JOUR': {'min_val': 1, 'max_val': 31},
'HEURE': {'min_val': 0, 'max_val': 23},
}
def normalize_column(column, min_val, max_val):
"""Normalize pandas Series from [min_val, max_val] to [0, 1]."""
# Ensure column is treated as float for division to work properly.
return (column.astype('float64') - min_val) / (max_val - min_val)
# Load the dataset in chunks
dataset_path = 'C:/Users/View/Desktop/oetem/dataset/dataset.parquet'
parquet_file = pq.ParquetFile(dataset_path)
# Determine the output file path
output_path = 'C:/Users/View/Desktop/oetem/dataset/dataset_normalized.parquet'
# Initialize variables for writing
writer = None
schema = None
# Process and normalize chunks
for i in range(parquet_file.num_row_groups):
table = parquet_file.read_row_group(i, columns=list(norm_params.keys()) + [' T'])
chunk = table.to_pandas()
# Normalize the columns
for col, params in norm_params.items():
chunk[col] = normalize_column(chunk[col], min_val=params['min_val'], max_val=params['max_val'])
# Convert the DataFrame back to a PyArrow Table for writing
#table = pa.Table.from_pandas(chunk)
table = pa.Table.from_pandas(chunk, preserve_index=False)
# If first chunk, initialize the writer with the schema
if writer is None:
schema = table.schema
writer = pq.ParquetWriter(output_path, schema)
writer.write_table(table)
# Close the writer to finalize the file
if writer is not None:
writer.close()