from __future__ import annotations

import argparse
import json
from pathlib import Path
import sys

import numpy as np
import pandas as pd

ROOT = Path(__file__).resolve().parents[2]
if str(ROOT) not in sys.path:
    sys.path.insert(0, str(ROOT))

from My.utils.dtw import compute_dtw_from_dataset
from My.utils.masks import build_semantic_mask, mask_to_neighbor_indices
from My.utils.patterns import learn_delay_patterns_from_dataset

FEATURE_NAMES = [
    "PM25",
    "PM10",
    "SO2",
    "NO2",
    "O3",
    "CO",
    "d2m",
    "t2m",
    "sp",
    "tp",
    "u10",
    "v10",
]
EARTH_RADIUS_KM = 6371.0


def load_nodes_csv(path: Path) -> pd.DataFrame:
    """Load node metadata and normalise column naming."""
    df = None
    for enc in ("utf-8-sig", "utf-8", "gbk"):
        try:
            df = pd.read_csv(path, encoding=enc)
            break
        except Exception:
            df = None
    if df is None:
        raise RuntimeError(f"Cannot read nodes csv: {path}")
    if df.shape[1] < 4:
        raise ValueError("nodes csv must have >=4 columns: id,name,lon,lat")
    sub = df.iloc[:, :4].copy()
    sub.columns = ["node_id", "name", "lon", "lat"]
    sub = sub.sort_values("node_id").reset_index(drop=True)
    sub["node_id"] = sub["node_id"].astype(int)
    return sub


def haversine_matrix(lat: np.ndarray, lon: np.ndarray) -> np.ndarray:
    """Compute pairwise haversine distance matrix (km)."""
    lat_rad = np.deg2rad(lat)[:, None]
    lon_rad = np.deg2rad(lon)[:, None]
    dlat = lat_rad - lat_rad.T
    dlon = lon_rad - lon_rad.T
    a = (
        np.sin(dlat / 2.0) ** 2
        + np.cos(lat_rad) * np.cos(lat_rad.T) * np.sin(dlon / 2.0) ** 2
    )
    c = 2 * np.arcsin(np.sqrt(np.clip(a, 0.0, 1.0)))
    return EARTH_RADIUS_KM * c


def build_neighbors(dist: np.ndarray, top_k: int) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
    """Return adjacency mask and neighbour lists from distance matrix."""
    n = dist.shape[0]
    if top_k <= 0 or top_k >= n:
        raise ValueError("top_k must satisfy 0 < top_k < num_nodes")
    order = np.argsort(dist, axis=1)
    # exclude self (index 0 after sort)
    neighbours = order[:, 1 : top_k + 1]

    mask = np.zeros((n, n), dtype=bool)
    np.fill_diagonal(mask, True)
    for i in range(n):
        mask[i, neighbours[i]] = True
    mask |= mask.T

    indices = np.full((n, top_k), -1, dtype=np.int64)
    valid = np.zeros((n, top_k), dtype=bool)
    for i in range(n):
        k = len(neighbours[i])
        indices[i, :k] = neighbours[i]
        valid[i, :k] = True
    return mask, indices, valid


def convert_dataset(root: Path, start: str, freq: str) -> tuple[int, int]:
    data_npz = root / "beijing_air_weather_merged.npz"
    nodes_csv = root / "beijing_nodes_geo.csv"
    if not data_npz.exists():
        raise FileNotFoundError(data_npz)
    if not nodes_csv.exists():
        raise FileNotFoundError(nodes_csv)

    arrays = np.load(data_npz)
    if "data" not in arrays:
        raise KeyError("beijing_air_weather_merged.npz missing 'data' array")
    data = arrays["data"].astype(np.float32)
    time_steps, num_nodes, _ = data.shape

    mask = ~np.isfinite(data)
    np.savez(root / "data_merged.npz", x=data, mask=mask)

    start_ts = pd.Timestamp(start, tz="Asia/Shanghai")
    times = pd.date_range(start=start_ts, periods=time_steps, freq=freq)
    times_df = pd.DataFrame({"datetime": times.tz_convert(None).astype(str)})
    times_df.to_csv(root / "times.csv", index=False, encoding="utf-8-sig")

    nodes_df = load_nodes_csv(nodes_csv)
    nodes_df.to_csv(root / "nodes.csv", index=False, encoding="utf-8-sig")

    offset = pd.tseries.frequencies.to_offset(freq)
    freq_td = getattr(offset, "delta", None)
    if freq_td is None or freq_td == pd.Timedelta(0):
        freq_td = pd.to_timedelta(offset.nanos, unit="ns")
    steps_per_day = int(pd.Timedelta(days=1) / freq_td)
    summary = {
        "feature_names": FEATURE_NAMES,
        "steps_per_day": steps_per_day,
        "freq_minutes": int(freq_td.total_seconds() / 60),
        "note": "generated from beijing_air_weather_merged.npz",
    }
    (root / "summary_merged.json").write_text(
        json.dumps(summary, ensure_ascii=False, indent=2),
        encoding="utf-8",
    )

    print("Converted dataset files:")
    print(" -", root / "data_merged.npz")
    print(" -", root / "times.csv")
    print(" -", root / "nodes.csv")
    print(" -", root / "summary_merged.json")
    return time_steps, num_nodes


def build_spatial_artifacts(
    root: Path,
    artifact_dir: Path,
    *,
    top_k: int,
    feature_idx: int,
    dtw_downsample: int,
    dtw_radius_ratio: float,
    pattern_window: int,
    num_patterns: int,
    pattern_stride: int,
    pattern_normalize: bool,
) -> None:
    nodes_df = pd.read_csv(root / "nodes.csv", encoding="utf-8-sig")
    lat = nodes_df["lat"].to_numpy(dtype=np.float64)
    lon = nodes_df["lon"].to_numpy(dtype=np.float64)
    dist = haversine_matrix(lat, lon)
    geo_mask, geo_indices, geo_valid = build_neighbors(dist, top_k=top_k)

    artifact_dir.mkdir(parents=True, exist_ok=True)
    geo_mask_path = artifact_dir / "geo_mask.npy"
    sem_mask_path = artifact_dir / "sem_mask.npy"
    geo_neighbors_path = artifact_dir / "geo_neighbors.npz"
    sem_neighbors_path = artifact_dir / "sem_neighbors.npz"

    np.save(geo_mask_path, geo_mask.astype(np.float32))
    np.savez(geo_neighbors_path, indices=geo_indices, valid=geo_valid)

    dtw_path = artifact_dir / "dtw_matrix.npy"
    dtw_matrix, _ = compute_dtw_from_dataset(
        root,
        feature_idx=feature_idx,
        radius=None,
        radius_ratio=dtw_radius_ratio,
        downsample=dtw_downsample,
        cache_path=dtw_path,
        reuse_if_exists=False,
        verbose=True,
    )

    sem_mask = build_semantic_mask(
        dtw_matrix,
        topk=top_k,
        include_self=True,
    )
    np.save(sem_mask_path, sem_mask.astype(np.float32))
    sem_indices, sem_valid = mask_to_neighbor_indices(
        sem_mask,
        max_neighbors=top_k,
        include_self=True,
        sort_matrix=dtw_matrix,
        descending=False,
    )
    np.savez(sem_neighbors_path, indices=sem_indices, valid=sem_valid)

    pattern_path = artifact_dir / "pattern_keys.npy"
    learn_delay_patterns_from_dataset(
        root,
        window=pattern_window,
        num_patterns=num_patterns,
        feature_idx=feature_idx,
        stride=pattern_stride,
        normalize=pattern_normalize,
        use_train_split=True,
        cache_path=pattern_path,
    )

    print("Generated spatial artifacts in", artifact_dir)
    print(" -", geo_mask_path)
    print(" -", geo_neighbors_path)
    print(" -", dtw_path)
    print(" -", sem_mask_path)
    print(" -", sem_neighbors_path)
    print(" -", pattern_path)


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description="Prepare Beijing dataset and spatial artifacts.")
    parser.add_argument("--data-dir", type=Path, default=Path("dataset/data_beijing"), help="Root directory containing raw Beijing files")
    parser.add_argument("--artifact-dir", type=Path, default=Path("artifacts/data_beijing"), help="Output directory for spatial artifacts")
    parser.add_argument("--start", type=str, default="2022-01-01 00:00", help="Start timestamp for generated times.csv (Asia/Shanghai)")
    parser.add_argument("--freq", type=str, default="H", help="Pandas frequency string for timestamp generation")
    parser.add_argument("--top-k", type=int, default=8, help="Number of nearest neighbours per node")
    parser.add_argument("--feature-idx", type=int, default=0, help="Feature index for DTW and pattern learning")
    parser.add_argument("--dtw-downsample", type=int, default=6, help="Downsample factor when computing DTW matrix")
    parser.add_argument("--dtw-radius-ratio", type=float, default=0.1, help="Sakoe-Chiba radius ratio for DTW")
    parser.add_argument("--pattern-window", type=int, default=6, help="Sliding window size for delay patterns")
    parser.add_argument("--num-patterns", type=int, default=16, help="Number of delay patterns to learn")
    parser.add_argument("--pattern-stride", type=int, default=1, help="Stride when extracting sliding windows")
    parser.add_argument("--no-pattern-normalize", action="store_true", help="Disable per-window normalization when learning patterns")
    return parser.parse_args()


def main() -> None:
    args = parse_args()
    data_dir = args.data_dir.expanduser()
    artifact_dir = args.artifact_dir.expanduser()
    steps, nodes = convert_dataset(data_dir, args.start, args.freq)
    build_spatial_artifacts(
        data_dir,
        artifact_dir,
        top_k=args.top_k,
        feature_idx=args.feature_idx,
        dtw_downsample=max(1, args.dtw_downsample),
        dtw_radius_ratio=max(0.0, args.dtw_radius_ratio),
        pattern_window=args.pattern_window,
        num_patterns=args.num_patterns,
        pattern_stride=max(1, args.pattern_stride),
        pattern_normalize=not args.no_pattern_normalize,
    )
    print(f"Finished preparation. steps={steps}, nodes={nodes}, top_k={args.top_k}")


if __name__ == "__main__":
    main()
