# SPDX-FileCopyrightText: 2024 German Aerospace Center (DLR.e.V.)
# SPDX-FileContributor: Ferdinand Rewicki <ferdinand.rewicki@dlr.de>
#
# SPDX-License-Identifier: Apache-2.0

"""Generator method for anomaly datasets."""

from datetime import timedelta
from typing import Optional, Union

import numpy as np

from saai.data.synth_ics_dataset import SyntheticEdenIssDataset


def generate_synthetic_ics_anomaly_dataset(
    n_timeseries: int,
    dim: Union[int, list[int]],
    n_days: Union[int, list[int]],
    n_anomalies: Union[int, list[int], list[list[int]]],
    r_sync: Union[float, list[float]],
    n_classes: Optional[Union[int, list[int]]] = None,
    anomaly_types: Optional[Union[int, list[int], list[list[int]]]] = None,
    granularity: timedelta = None,
    lags: Optional[Union[dict[int, int], list[dict[int, int]]]] = None,
    add_red_noise: bool = True,
    with_metadata: bool = True,
    randomize: bool = False,
):
    """Generate anomaly classification datasets.

    Parameters
    ----------
    randomize
    n_timeseries
    dim
    n_days
    n_anomalies
    r_sync
    anomaly_types
    n_classes
    granularity
    lags
    add_red_noise
    with_metadata

    Returns
    -------
    anomalies: np-list, shape = (n_cases), inner_shape = (n_timepoints).
        The univariate anomalous subsequences extracted from the
        multivariate time series.
    labels: array-like, shape = (n_cases)
        The anomaly type labels
    dim_lookup: array-like, shape = (n_cases)
        The original dimension of each anomaly
    idx_lookup: array-like, shape = (n_cases)
        The start and end index of each anomaly related to its dimension
    corr_avg: float
        The average correlation between the channels of the multivariate
        time series.
    dataset: SyntheticEdenIssDataset
        The generated dataset

    """
    if lags is None:
        lags = {0: 0}

    if randomize:
        dataset = SyntheticEdenIssDataset.generate_random(
            n_timeseries,
            dim=dim,
            n_days=n_days,
            n_classes=n_classes,
            n_anomalies=n_anomalies,
            r_sync=r_sync,
            granularity=granularity,
            lags=lags,
            add_red_noise=add_red_noise,
        )
    else:
        dataset = SyntheticEdenIssDataset.generate(
            n_timeseries,
            dim=dim,
            n_days=n_days,
            anomaly_types=anomaly_types,
            n_anomalies=n_anomalies,
            r_sync=r_sync,
            granularity=granularity,
            lags=lags,
            add_red_noise=add_red_noise,
        )

    for mts, _anomalies, _labels in dataset:
        X = []
        y = []
        idx_lookup = []
        dim_lookup = []
        for d in range(mts.shape[1]):
            X += [np.array(mts[0][d][a[0] : a[1]]) for a in _anomalies[d]]
            y += _labels[d]
            idx_lookup += [a for a in _anomalies[d]]
            dim_lookup += [d for _ in _labels[d]]

        if with_metadata:
            corr_avg = _average_correlation(mts)
            yield X, np.array(y), np.array(dim_lookup), np.array(
                idx_lookup
            ), corr_avg, dataset
        else:
            yield X, np.array(y)


def generate_synthetic_ics_anomaly_dataset_v2(
    n_timeseries: int,
    dim: Union[int, list[int]],
    n_days: Union[int, list[int]],
    n_anomalies: Union[int, list[int], list[list[int]]],
    r_sync: Union[float, list[float]],
    n_classes: Optional[Union[int, list[int]]] = None,
    anomaly_types: Optional[Union[int, list[int], list[list[int]]]] = None,
    granularity: timedelta = None,
    lags: Optional[Union[dict[int, int], list[dict[int, int]]]] = None,
    add_red_noise: bool = True,
    with_metadata: bool = True,
    randomize: bool = False,
):
    """Generate anomaly classification datasets.

    Parameters
    ----------
    randomize
    n_timeseries
    dim
    n_days
    n_anomalies
    r_sync
    anomaly_types
    n_classes
    granularity
    lags
    add_red_noise
    with_metadata

    Returns
    -------
    anomalies: np-list, shape = (n_cases), inner_shape = (n_timepoints).
        The univariate anomalous subsequences extracted from the
        multivariate time series.
    labels: array-like, shape = (n_cases)
        The anomaly type labels
    dim_lookup: array-like, shape = (n_cases)
        The original dimension of each anomaly
    idx_lookup: array-like, shape = (n_cases)
        The start and end index of each anomaly related to its dimension
    corr_avg: float
        The average correlation between the channels of the multivariate
        time series.
    dataset: SyntheticEdenIssDataset
        The generated dataset

    """
    if randomize:
        dataset = SyntheticEdenIssDataset.generate_random(
            n_timeseries,
            dim=dim,
            n_days=n_days,
            n_classes=n_classes,
            n_anomalies=n_anomalies,
            r_sync=r_sync,
            granularity=granularity,
            lags=lags,
            add_red_noise=add_red_noise,
        )
    else:
        dataset = SyntheticEdenIssDataset.generate(
            n_timeseries,
            dim=dim,
            n_days=n_days,
            anomaly_types=anomaly_types,
            n_anomalies=n_anomalies,
            r_sync=r_sync,
            granularity=granularity,
            lags=lags,
            add_red_noise=add_red_noise,
        )

    # before_start = [6, 30, 6, 192, 6, 6, 6, 0]
    # add_to_end = [192, 216, 192, 60, 120, 60, 168, 0]

    for mts, _anomalies, _labels in dataset:
        X = []
        y = []
        idx_lookup = []
        dim_lookup = []

        before_start, after_end = np.random.uniform(0, 12, size=2)
        for d in range(mts.shape[1]):
            X += [
                np.array(mts[0][d][a[0] - before_start : a[1] + after_end])
                for a, l in zip(_anomalies[d], _labels[d])
            ]
            y += _labels[d]
            idx_lookup += [a for a in _anomalies[d]]
            dim_lookup += [d for _ in _labels[d]]

        if with_metadata:
            corr_avg = _average_correlation(mts)
            yield X, np.array(y), np.array(dim_lookup), np.array(
                idx_lookup
            ), corr_avg, dataset
        else:
            yield X, np.array(y)


def _average_correlation(timeseries: np.ndarray) -> float:
    correlation_matrix = np.corrcoef(timeseries.squeeze())
    corr_triu = np.triu(correlation_matrix, k=1)
    corr_avg = corr_triu.sum() / len(corr_triu.nonzero()[1])

    return corr_avg
