# SPDX-FileCopyrightText: 2024 German Aerospace Center (DLR.e.V.)
# SPDX-FileContributor: Ferdinand Rewicki <ferdinand.rewicki@dlr.de>
#
# SPDX-License-Identifier: Apache-2.0

"""edeniss2020 experiment."""

import pickle

import aeon.distances
import hydra
import mlflow
import numpy as np
from aeon.clustering import BaseClusterer, TimeSeriesKMeans
from hydra.conf import HydraConf, RunDir, SweepDir
from hydra_zen import ZenStore, make_custom_builds_fn, zen
from omegaconf import DictConfig
from tqdm import trange
from tslearn.clustering import TimeSeriesKMeans as TSLearnTimeSeriesKMeans
from tslearn.utils import to_time_series_dataset

from saai.data.edeniss_dataloader import load_anomalies_from_edeniss_ts_file
from saai.features import DenoisedFeatures
from saai.methods import SAAI, SKLearnSSC, TSLearnSSC
from saai.utils import MLFlowHelper, seed_rngs

builds = make_custom_builds_fn(populate_full_signature=True)
store = ZenStore(overwrite_ok=True)

global_params = {
    "seed": 42,
    "sensor": "edeniss2020_ics",
    "experiment_name": "SAAI",
    "tags": {"run_id": "edeniss2020_ics", "try": 1},
    "k": list(range(2, 20)),
    "dataset_path": "data",
}
global_store = store(global_params, name="global_params", group="gp", pakage="_global_")


transform_store = store(group="transform")
transform_store(DenoisedFeatures, w=4, name="denoised")

clusterer_store = store(group="clusterer")
clusterer_store(
    TSLearnTimeSeriesKMeans,
    metric="dtw",
    max_iter=10,
    random_state="${gp.seed}",
    name="TSLearnKMeans",
    zen_partial=True,
)
clusterer_store(
    TimeSeriesKMeans,
    distance="dtw",
    max_iter=10,
    n_init=1,
    init_algorithm="kmeans++",
    averaging_method="ba",
    random_state="${gp.seed}",
    name="AeonKMeans",
    zen_partial=True,
)


@store(
    name="main",
    hydra_defaults=[
        "_self_",
        {"gp": "global_params"},
        {"clusterer": "TSLearnKMeans"},
        {"transform": "denoised"},
        {"override hydra/launcher": "submitit_local"},
    ],
)
def main(gp, clusterer: BaseClusterer, transform):
    """Perform the main experiment.

    Parameters
    ----------
    gp
    data_generator
    clusterer


    """
    seed_rngs(gp["seed"])
    gp["k"] = [gp["k"]] if not isinstance(gp["k"], list) else gp["k"]
    scoring_functions = [SAAI, SKLearnSSC, TSLearnSSC]
    wins = {m: 0 for m in scoring_functions}
    X, y, dim_lookup, idx_lookup = load_anomalies_from_edeniss_ts_file(
        gp["sensor"], basepath=gp["dataset_path"]
    )
    X = [x.squeeze() for x in X]
    X = to_time_series_dataset(X)
    X = transform.transform(X)
    X = to_time_series_dataset(X)
    min_k = min(gp["k"])
    max_k = max(gp["k"])

    scores = {sf: np.zeros((max_k - min_k + 1,)) for sf in scoring_functions}
    for k in trange(min_k, max_k + 1):
        _clusterer = clusterer(n_clusters=k)
        X_ = X
        if isinstance(_clusterer, aeon.clustering.TimeSeriesKMeans):
            X_ = np.nan_to_num(X)
        _clusterer.fit(X_)
        y_pred = _clusterer.predict(X_)

        rundir = hydra.core.hydra_config.HydraConfig.get().runtime.output_dir
        file = open(f"{rundir}/clusters_k_{k}.pkl", "wb")
        pickle.dump(
            {
                "X": X_,
                "clusters": y_pred,
                "y": "y",
                "dim_lookup": dim_lookup,
                "idx_lookup": idx_lookup,
            },
            file,
        )
        file.close()
        mlflow.log_artifact(f"{rundir}/clusters_k_{k}.pkl")

        for sf in scoring_functions:
            scores[sf][k - min_k] = sf.scoring_function(
                X,
                y_pred,
                k,
                centroids=_clusterer.cluster_centers_.squeeze(),
                y_true=y,
                idx_lookup=idx_lookup,
                dim_lookup=dim_lookup,
            )
            mlflow.log_metric(sf.NAME, scores[sf][k - min_k], step=k)

    for sf in scoring_functions:
        k_hat = np.argmax(scores[sf]) + min_k
        wins[sf] += k_hat == len(np.unique(y))


def _pre_call(cfg: DictConfig):
    MLFlowHelper.init_experiment(cfg.gp.experiment_name)
    MLFlowHelper.log_params_from_omegaconf_dict(cfg, split_lists=False)


if __name__ == "__main__":
    store(
        HydraConf(
            run=RunDir("./out/hydra/default/${now:%Y-%m-%d}/${now:%H-%M-%S}/"),
            sweep=SweepDir(
                dir="./out/hydra/multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}/",
                subdir="${hydra.job.num}",
            ),
        )
    )
    store.add_to_hydra_store()
    zen(main, pre_call=_pre_call).hydra_main(
        config_name="main",
        version_base="1.3",
        config_path=None,
    )
