|
import numpy as np |
|
import pandas as pd |
|
import joblib |
|
|
|
def create_features( |
|
data, |
|
target_particle, |
|
lag_days=7, |
|
sma_days=7, |
|
): |
|
""" |
|
Creates lagged features, SMA features, last year's particle data (NO2 and O3) for specific days, |
|
sine and cosine transformations for 'weekday' and 'month', and target variables for the specified |
|
particle ('O3' or 'NO2') for the next 'days_ahead' days. Scales features and targets without |
|
disregarding outliers and saves the scalers for inverse scaling. Splits the data into train, |
|
validation, and test sets using the most recent dates. Prints the number of rows with missing |
|
values dropped from the dataset. |
|
|
|
Parameters: |
|
- data (pd.DataFrame): The input time-series dataset. |
|
- target_particle (str): The target particle ('O3' or 'NO2') for which targets are created. |
|
- lag_days (int): Number of lag days to create features for (default 7). |
|
- sma_days (int): Window size for Simple Moving Average (default 7). |
|
- days_ahead (int): Number of days ahead to create target variables for (default 3). |
|
|
|
Returns: |
|
- X_train_scaled (pd.DataFrame): Scaled training features. |
|
- y_train_scaled (pd.DataFrame): Scaled training targets. |
|
- X_val_scaled (pd.DataFrame): Scaled validation features (365 days). |
|
- y_val_scaled (pd.DataFrame): Scaled validation targets (365 days). |
|
- X_test_scaled (pd.DataFrame): Scaled test features (365 days). |
|
- y_test_scaled (pd.DataFrame): Scaled test targets (365 days). |
|
""" |
|
import warnings |
|
|
|
import numpy as np |
|
import pandas as pd |
|
from sklearn.preprocessing import StandardScaler |
|
|
|
warnings.filterwarnings("ignore") |
|
|
|
lag_features = [ |
|
"NO2", |
|
"O3", |
|
"wind_speed", |
|
"mean_temp", |
|
"global_radiation", |
|
"minimum_visibility", |
|
"humidity", |
|
] |
|
if target_particle == "NO2": |
|
lag_features = lag_features + ["percipitation", "pressure"] |
|
|
|
if target_particle not in ["O3", "NO2"]: |
|
raise ValueError("target_particle must be 'O3' or 'NO2'") |
|
|
|
data = data.copy() |
|
data["date"] = pd.to_datetime(data["date"]) |
|
data = data.sort_values("date").reset_index(drop=True) |
|
|
|
|
|
if "weekday" not in data.columns or data["weekday"].dtype == object: |
|
data["weekday"] = data["date"].dt.weekday |
|
if "month" not in data.columns: |
|
data["month"] = data["date"].dt.month |
|
|
|
|
|
data["weekday_sin"] = np.sin(2 * np.pi * data["weekday"] / 7) |
|
data["weekday_cos"] = np.cos(2 * np.pi * data["weekday"] / 7) |
|
data["month_sin"] = np.sin( |
|
2 * np.pi * (data["month"] - 1) / 12 |
|
) |
|
data["month_cos"] = np.cos(2 * np.pi * (data["month"] - 1) / 12) |
|
|
|
|
|
for feature in lag_features: |
|
for lag in range(1, lag_days + 1): |
|
data[f"{feature}_lag_{lag}"] = data[feature].shift(lag) |
|
|
|
|
|
for feature in lag_features: |
|
data[f"{feature}_sma_{sma_days}"] = ( |
|
data[feature].rolling(window=sma_days).mean() |
|
) |
|
|
|
|
|
|
|
data["O3_last_year"] = 0 |
|
data["NO2_last_year"] = 0 |
|
|
|
|
|
for i in range(1, lag_days + 1): |
|
data[f"O3_last_year_{i}_days_before"] = 0 |
|
data[f"NO2_last_year_{i}_days_before"] = 0 |
|
|
|
|
|
data["O3_last_year_3_days_after"] = 0 |
|
data["NO2_last_year_3_days_after"] = 0 |
|
|
|
|
|
rows_before = data.shape[0] |
|
|
|
|
|
data = data.dropna().reset_index(drop=True) |
|
|
|
|
|
rows_after = data.shape[0] |
|
|
|
|
|
rows_dropped = rows_before - rows_after |
|
print(f"Number of rows with missing values dropped: {rows_dropped}") |
|
|
|
|
|
data = data.sort_values("date").reset_index(drop=True) |
|
|
|
|
|
exclude_cols = ["date", "weekday", "month"] |
|
feature_cols = [col for col in data.columns if col not in exclude_cols] |
|
|
|
|
|
x = data[feature_cols] |
|
|
|
|
|
|
|
feature_scaler = joblib.load(f"scalers/feature_scaler_{target_particle}.joblib") |
|
|
|
|
|
X_scaled = feature_scaler.fit_transform(x) |
|
|
|
|
|
X_scaled = pd.DataFrame( |
|
X_scaled, columns=feature_cols, index=x.index |
|
) |
|
|
|
return X_scaled |
|
|