import json
import os
from typing import Dict, List, Tuple

import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split


def parse_dates_to_components(dates_series: pd.Series) -> pd.DataFrame:
    # Coerce invalid dates to NaT; upstream we will drop such rows
    dt = pd.to_datetime(dates_series, errors="coerce")
    return pd.DataFrame(
        {
            "Year": dt.dt.year.astype(np.int32),
            "Month": dt.dt.month.astype(np.int32),
            "Day": dt.dt.day.astype(np.int32),
            "Hour": dt.dt.hour.astype(np.int32),
        }
    )


def build_features(df: pd.DataFrame) -> Tuple[pd.DataFrame, List[str], List[str]]:
    # Numeric components from Dates and coordinates
    date_components = parse_dates_to_components(df["Dates"])  # Year, Month, Day, Hour
    numeric_cols = ["Year", "Month", "Day", "Hour", "X", "Y"]
    numeric_df = pd.concat([date_components, df[["X", "Y"]].reset_index(drop=True)], axis=1)

    # Categorical one-hots
    day_ohe = pd.get_dummies(df["DayOfWeek"], prefix="dow")
    dist_ohe = pd.get_dummies(df["PdDistrict"], prefix="pd")

    features = pd.concat([numeric_df, day_ohe, dist_ohe], axis=1)
    feature_columns = list(features.columns)
    return features, feature_columns, numeric_cols


def compute_norm_stats(train_features: pd.DataFrame, numeric_cols: List[str]) -> Tuple[Dict[str, float], Dict[str, float]]:
    means = train_features[numeric_cols].mean().to_dict()
    stds_series = train_features[numeric_cols].std(ddof=0)
    # Avoid division by zero
    stds_series = stds_series.replace(0, 1.0)
    stds = stds_series.to_dict()
    return means, stds


def apply_normalization(df: pd.DataFrame, numeric_cols: List[str], means: Dict[str, float], stds: Dict[str, float]) -> pd.DataFrame:
    df = df.copy()
    for col in numeric_cols:
        df[col] = (df[col] - means[col]) / stds[col]
    return df


def preprocess(
    input_path: str,
    out_dir: str,
    test_size: float = 0.2,
    seed: int = 42,
    normalize: bool = True,
) -> None:
    os.makedirs(out_dir, exist_ok=True)

    # Load raw CSV
    df = pd.read_csv(input_path)

    # Coerce numerics/dates and skip abnormal rows (NaN/Inf/NaT)
    df["X"] = pd.to_numeric(df["X"], errors="coerce")
    df["Y"] = pd.to_numeric(df["Y"], errors="coerce")
    parsed_dates = pd.to_datetime(df["Dates"], errors="coerce")

    valid_mask = (
        parsed_dates.notna()
        & np.isfinite(df["X"].values)
        & np.isfinite(df["Y"].values)
        & df["Category"].notna()
        & df["DayOfWeek"].notna()
        & df["PdDistrict"].notna()
    )
    df = df.loc[valid_mask].reset_index(drop=True)

    # Build features
    features, feature_columns, numeric_cols = build_features(df)

    # Labels mapping
    categories = sorted(df["Category"].unique())
    label_mapping = {cat: idx for idx, cat in enumerate(categories)}
    labels = df["Category"].map(label_mapping).astype(np.int64)

    # Stratified split
    indices = np.arange(len(df))
    train_idx, valid_idx = train_test_split(
        indices, test_size=test_size, random_state=seed, stratify=labels
    )

    train_features = features.iloc[train_idx].reset_index(drop=True)
    valid_features = features.iloc[valid_idx].reset_index(drop=True)
    y_train = labels.iloc[train_idx].reset_index(drop=True)
    y_valid = labels.iloc[valid_idx].reset_index(drop=True)

    norm_means: Dict[str, float] = {}
    norm_stds: Dict[str, float] = {}
    if normalize:
        norm_means, norm_stds = compute_norm_stats(train_features, numeric_cols)
        train_features = apply_normalization(train_features, numeric_cols, norm_means, norm_stds)
        valid_features = apply_normalization(valid_features, numeric_cols, norm_means, norm_stds)

    # Persist processed CSVs (features + label)
    train_out = train_features.copy()
    train_out["label"] = y_train
    valid_out = valid_features.copy()
    valid_out["label"] = y_valid

    train_out.to_csv(os.path.join(out_dir, "train.csv"), index=False)
    valid_out.to_csv(os.path.join(out_dir, "valid.csv"), index=False)

    # Persist metadata
    with open(os.path.join(out_dir, "label_mapping.json"), "w", encoding="utf-8") as f:
        json.dump(label_mapping, f, ensure_ascii=False, indent=2)

    feature_meta = {
        "feature_columns": feature_columns,
        "numeric_columns": numeric_cols,
        "normalization": {
            "enabled": normalize,
            "mean": norm_means,
            "std": norm_stds,
        },
    }
    with open(os.path.join(out_dir, "feature_columns.json"), "w", encoding="utf-8") as f:
        json.dump(feature_meta, f, ensure_ascii=False, indent=2)

    print(
        f"Processed data written to '{out_dir}'. Train size: {len(train_out)}, Valid size: {len(valid_out)}, Features: {len(feature_columns)}, Classes: {len(label_mapping)}"
    )


def main() -> None:
    # Configuration (press F5 to run)
    INPUT_PATH = "./sf-crime_open/data.csv"
    OUT_DIR = "./processed"
    TEST_SIZE = 0.2
    SEED = 42
    NORMALIZE = True

    preprocess(
        input_path=INPUT_PATH,
        out_dir=OUT_DIR,
        test_size=TEST_SIZE,
        seed=SEED,
        normalize=NORMALIZE,
    )


if __name__ == "__main__":
    main()


