# SPDX-FileCopyrightText: 2024 German Aerospace Center (DLR.e.V.)
# SPDX-FileContributor: Ferdinand Rewicki <ferdinand.rewicki@dlr.de>
#
# SPDX-License-Identifier: Apache-2.0

"""Main experiment."""

from collections.abc import Generator

import aeon.distances
import mlflow
import numpy as np
from aeon.clustering import BaseClusterer, TimeSeriesKMeans
from hydra.conf import HydraConf, RunDir, SweepDir
from hydra_zen import ZenStore, make_custom_builds_fn, zen
from omegaconf import DictConfig
from tslearn.clustering import TimeSeriesKMeans as TSLearnTimeSeriesKMeans
from tslearn.utils import to_time_series_dataset

from saai.data.synth_ics_generators import generate_synthetic_ics_anomaly_dataset
from saai.features import DenoisedFeatures
from saai.methods import ARI, FMI, SAAI, SKLearnSSC, TSLearnSSC, TSXMeans
from saai.utils import MLFlowHelper, seed_rngs

builds = make_custom_builds_fn(populate_full_signature=True)
store = ZenStore(overwrite_ok=True)

global_params = {
    "seed": 42,
    "sensor": "ics",
    "experiment_name": "SAAI",
    "tags": {"run_id": "static", "try": 1},
    "k": [2, 3, 4, 5],
}
global_store = store(global_params, name="global_params", group="gp", pakage="_global_")

DataGeneratorConf = builds(generate_synthetic_ics_anomaly_dataset)
static_conf = DataGeneratorConf(
    n_timeseries=1,
    dim=2,
    n_days=90,
    anomaly_types=[1, 2, 3, 4],
    n_anomalies=10,
    r_sync=0.9,
    granularity=None,
    lags=None,
    add_red_noise=True,
    randomize=False,
)
random_conf = DataGeneratorConf(
    n_timeseries=1,
    dim=2,
    n_days=90,
    anomaly_types=None,
    n_classes=4,
    n_anomalies=10,
    r_sync=0.9,
    granularity=None,
    lags=None,
    add_red_noise=True,
    randomize=True,
)
data_generator_store = store(group="data_generator")
data_generator_store(static_conf, name="static")
data_generator_store(random_conf, name="random")

transform_store = store(group="transform")
transform_store(DenoisedFeatures, w=4, name="denoised")

clusterer_store = store(group="clusterer")
clusterer_store(
    TSLearnTimeSeriesKMeans,
    metric="dtw",
    max_iter=10,
    random_state="${gp.seed}",
    name="TSLearnKMeans",
    zen_partial=True,
)
clusterer_store(
    TimeSeriesKMeans,
    distance="dtw",
    max_iter=10,
    n_init=1,
    init_algorithm="kmeans++",
    averaging_method="ba",
    random_state="${gp.seed}",
    name="AeonKMeans",
    zen_partial=True,
)


@store(
    name="main",
    hydra_defaults=[
        "_self_",
        {"gp": "global_params"},
        {"data_generator": "static"},
        {"clusterer": "TSLearnKMeans"},
        {"transform": "denoised"},
        {"override hydra/launcher": "submitit_local"},
    ],
)
def main(gp, data_generator: Generator, clusterer: BaseClusterer, transform):
    """Perform the main experiment.

    Parameters
    ----------
    gp
    data_generator
    clusterer


    """
    seed_rngs(gp["seed"])
    # iterator over trials (n_timeseries)
    scoring_functions = [ARI, FMI, SAAI, SKLearnSSC, TSLearnSSC]
    # kfinders = [GapStatistic, VarianceReduction, EntropyReduction]
    kfinders = [TSXMeans]
    wins = {m: 0 for m in scoring_functions + kfinders}
    _corr_avg = 0
    total_runs = 0
    for X, y, dim_lookup, idx_lookup, r, ds in data_generator:
        with mlflow.start_run(nested=True) as run:
            X = to_time_series_dataset(X)
            X = transform.transform(X)
            X = to_time_series_dataset(X)
            min_k = min(gp["k"])
            max_k = max(gp["k"])

            scores = {sf: np.zeros((max_k - min_k + 1,)) for sf in scoring_functions}
            for k in range(min_k, max_k + 1):
                _clusterer = clusterer(n_clusters=k)
                X_ = X
                if isinstance(_clusterer, aeon.clustering.TimeSeriesKMeans):
                    X_ = np.nan_to_num(X)
                _clusterer.fit(X_)
                y_pred = _clusterer.predict(X_)
                for sf in scoring_functions:
                    scores[sf][k - min_k] = sf.scoring_function(
                        X,
                        y_pred,
                        k,
                        centroids=_clusterer.cluster_centers_.squeeze(),
                        y_true=y,
                        idx_lookup=idx_lookup,
                        dim_lookup=dim_lookup,
                    )
                    mlflow.log_metric(sf.NAME, scores[sf][k - min_k], step=k)

            for sf in scoring_functions:
                k_hat = np.argmax(scores[sf]) + min_k
                wins[sf] += k_hat == len(np.unique(y))

            for kf in kfinders:
                k_hat = kf.find_k(
                    X,
                    max(gp["k"]),
                )
                wins[kf] += k_hat == len(np.unique(y))
            mlflow.log_metric("corr_avg", r)
            _corr_avg += r
            parent_run = mlflow.get_parent_run(run.info.run_id)
            parent_params = parent_run.data.params
            parent_params["data_generator.r_sync"] = ds.r_sync[total_runs]
            parent_params["data_generator.n_classes"] = len(
                ds.anomaly_types[total_runs]
            )
            mlflow.log_params(parent_params)
            mlflow.set_tags(parent_run.data.tags)
            total_runs += 1

    for m in scoring_functions + kfinders:
        mlflow.log_metric(f"{m.NAME}_wins", wins[m], run_id=parent_run.info.run_id)
        mlflow.log_metric(
            f"{m.NAME}_acc", wins[m] / total_runs, run_id=parent_run.info.run_id
        )
    mlflow.log_metric("corr_avg", _corr_avg / total_runs, run_id=parent_run.info.run_id)


def pre_call(cfg: DictConfig):
    """Init experiment."""
    if cfg.data_generator.n_classes is None:
        cfg.data_generator.n_classes = len(cfg.data_generator.anomaly_types)
    MLFlowHelper.init_experiment(cfg.gp.experiment_name)
    MLFlowHelper.log_params_from_omegaconf_dict(cfg, split_lists=False)


if __name__ == "__main__":
    store(
        HydraConf(
            run=RunDir("./out/hydra/default/${now:%Y-%m-%d}/${now:%H-%M-%S}/"),
            sweep=SweepDir(
                dir="./out/hydra/multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}/",
                subdir="${hydra.job.num}",
            ),
        )
    )
    store.add_to_hydra_store()
    zen(main, pre_call=pre_call).hydra_main(
        config_name="main",
        version_base="1.3",
        config_path=None,
    )
