# -*- coding: utf-8 -*-
"""Example of using kNN for outlier detection
"""
# Author: Yue Zhao <zhaoy@cmu.edu>
# License: BSD 2 clause

from __future__ import division
from __future__ import print_function

import os
import sys

# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(
    os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))

from pyod.models.knn import KNN
from pyod.models.pca import PCA
from pyod.models.rgraph import RGraph
from pyod.models.lof import LOF
from pyod.models.cof import COF
from pyod.models.iforest import IForest
from pyod.models.kde import KDE
from pyod.models.copod import COPOD
# from pyod.models.suod import SUOD
from pyod.utils.data import generate_data

import numpy as np
from sklearn.svm import OneClassSVM
import warnings

warnings.filterwarnings("ignore")

# compute pairwise similarity matrix
def pairPearson(A, B, precompute_p1):
    # Get number of rows in either A or B
    N = B.shape[0]

    # Store columnw-wise in A and B, as they would be used at few places
    sA = A.sum(0)
    sB = B.sum(0)

    # Basically there are four parts in the formula. We would compute them one-by-one
    p1 = N * precompute_p1
    p2 = sA * sB[:, None]

    p3 = N * ((B ** 2).sum(0)) - (sB ** 2)
    p4 = N * ((A ** 2).sum(0)) - (sA ** 2)

    # Finally compute Pearson Correlation Coefficient as 2D array
    pcorr = ((p1 - p2) / (np.sqrt(p4 * p3[:, None])))
    pcorr[np.isinf(pcorr)] = 0
    pcorr[np.isnan(pcorr)] = 0
    return pcorr

# time squence outlier detection
def anormal_region_detection(region_obser, model_r = None, model_int = None):
    # preprocessing the data
    data = region_obser["data"]
    distance = region_obser["distance"]
    numRegion = region_obser["number_region"] # number of region
    numSource = region_obser["number_source"] # number of data sources


    MPS = 1 # minutes per time slot
    numTime = data.shape[0] # nummber of time slots
    # print(data.shape, "LLLLLLLLLLLLLLLLLLLLL")

    if "detect_start" in region_obser: # the time that start 
        detect_st =  region_obser["detect_start"]
    else:
        detect_st = 0
    
    endTime = region_obser["endTime"] // MPS

    # Params for algorithm
    alpha = 0.05 # excepted proportion of anomalous regions for each day
    beta = 0.1 # the proportion of candidate anomalous regions
    nDailyAnomaly_int = int(60 / MPS  * numRegion * alpha) 
    nDailyAnomaly_r = int(60 / MPS  * numRegion * beta)
    t_delta = 2 # the number of consecutive time slots we want to check.
    corrThres = 0.95 # the threshold for historically similar regions
    lCorr = 60  // MPS  # use one hour data for calculating pearson correlation 
    R = 50 # regarded as neighbors if their centers are within distance R. 1 = 500m
    numNearPart = 2 # the number of consecutive time slots we want to check.

    mNear = np.identity(numRegion)
    mNear = np.concatenate((mNear, (distance > 0) & (distance <= R)))
    sMNear = np.repeat(mNear.sum(axis=1), numSource*t_delta)
    mNearTile = np.tile(mNear, (1, numSource*t_delta))

    score_ind = np.zeros((numTime, numRegion*numSource)) #individual anomaly score score_ind t,r,s
    score_r = np.zeros((numTime, numRegion)) + 100 # anomaly score that only checking the data sources of its own
    score_int = np.zeros((numTime, numRegion)) + 100 # anomaly score that take nearby regions and consecutive time slots into consideration
    anomalies = np.zeros((numTime, numRegion))


    dVector = (t_delta - 1 + numNearPart) * numSource

    if model_r is None:       
        model_r = OneClassSVM(nu=0.1)
    if model_int is None:
        model_int = OneClassSVM(nu=0.1)

    if numTime <= lCorr:
        return anomalies, model_r, model_int

    train_r = np.zeros((0, numSource))
    train_int = np.zeros((0, dVector))
    tsTrain = 60 // MPS
    nTrain = tsTrain * numRegion

    st = max(detect_st - tsTrain, lCorr)

    trained = False
    p1 = np.einsum('ij,ik->kj', data[(st - lCorr):st, :], data[(st - lCorr):st, :])

    for ts in range(st, endTime):


        # update pearson correlation
        pp = np.nan_to_num(pairPearson(data[(ts - lCorr):ts, :], data[(ts - lCorr):ts, :], p1))
        p1 = p1 + data[ts, :] * data[ts, :][:, None]
        p1 = p1 - data[ts - lCorr, :] * data[ts - lCorr, :][:, None]
        pp_new = np.nan_to_num(pairPearson(data[(ts - lCorr + 1):(ts + 1), :], data[(ts - lCorr + 1):(ts + 1), :], p1))

        pp_diff = pp - pp_new
        pp_diff[np.where(np.logical_or(pp < corrThres, pp_diff < 0))] = 0
        pp_diff = pp_diff * lCorr
        pp_tmp = np.array(pp)
        pp_tmp[np.where(pp < corrThres)] = 0

        # calculate individual anomaly score
        scaledData = ((data[:(ts + 1), :] - data[:(ts + 1), :].mean(0)) / data[:(ts + 1), :].std(0))[-1]
        scaledData[np.isinf(scaledData)] = 0
        scaledData[np.isnan(scaledData)] = 0
        weightedAvg = np.nan_to_num(
            np.sum(pp_tmp * np.tile(scaledData, (scaledData.shape[0], 1)), axis=1) / np.sum(pp_tmp, axis=1))
        sign = ((scaledData > weightedAvg).astype(int) - 0.5) * 2
        score_ind[ts, :] = sign * np.nan_to_num(np.sum(pp_tmp * pp_diff, axis=1) / np.sum(pp_tmp, axis=1))

        tmpX = (mNearTile * score_ind[(ts - t_delta + 1):(ts + 1), :].ravel()).reshape((-1, numRegion)).sum(axis=1)
        tmpX = np.nan_to_num(tmpX / sMNear)
        tmpX = tmpX.reshape(numNearPart, numRegion, t_delta, numSource).transpose([1, 2, 0, 3]).reshape((numRegion, -1))
        tmpX = np.c_[tmpX[:, -numSource * numNearPart:], tmpX[:, :-numSource * numNearPart].reshape(
            (numRegion, t_delta - 1, numNearPart, numSource))[:, :, 0, :].reshape((numRegion, -1))]
        x_r = np.array(tmpX[:, 0:numSource])
        x_int = np.array(tmpX)

        # use the
        train_r = np.r_[train_r, x_r][-nTrain:, :]
        train_int = np.r_[train_int, x_int][-nTrain:, :]
        if ts > detect_st:
          if ts % (60 // MPS) == 0 or not trained:        
            model_r.fit(train_r)
            model_int.fit(train_int)
            trained = True

          score_r[ts, :] = model_r.decision_function(x_r).flatten()
          score_int[ts, :] = model_int.decision_function(x_int).flatten()
          argsort_r = score_r[(ts - 60 // MPS + 1):(ts + 1), :].flatten().argsort()
          argsort_int = score_int[(ts - 60 // MPS + 1):(ts + 1), :].flatten().argsort()

          selected_int = argsort_int[np.where(np.in1d(argsort_int, argsort_r[0:nDailyAnomaly_r]))[0]][
                           0:nDailyAnomaly_int]
          iAnomalies = selected_int[(selected_int // numRegion) == (60 // MPS - 1)] % numRegion
          iAnomalies = iAnomalies[score_int[ts, iAnomalies] != 100]
          anomalies[ts, iAnomalies] = 1
    # print(distance, anomalies)
    return anomalies, model_r, model_int


def outlier_detection(data, clf_name='KNN'):
    if clf_name == 'KNN':
        clf = KNN()
    elif clf_name == 'PCA':
        clf = PCA(n_components=3)
    elif clf_name == 'R-graph':
        clf = RGraph(n_nonzero=100, transition_steps=20, gamma=50, blocksize_test_data=20,
                     tau=1, preprocessing=True, active_support=False, gamma_nz=False,
                     algorithm='lasso_lars', maxiter=100, verbose=1)
    elif clf_name == 'LOF':
        clf = LOF()
    elif clf_name == 'COF':
        clf = COF(n_neighbors=30)
    elif clf_name == 'IForest':
        clf = IForest()
    elif clf_name == 'KDE':
        clf = KDE()
    elif clf_name == 'COPOD':
        clf = COPOD()
    # elif clf_name == 'SUOD':
    #     clf_name = 'SUOD'

    #     # initialized a group of outlier detectors for acceleration
    #     detector_list = [LOF(n_neighbors=15), LOF(n_neighbors=20),
    #                      LOF(n_neighbors=25), LOF(n_neighbors=35),
    #                      COPOD(), IForest(n_estimators=100),
    #                      IForest(n_estimators=200)]

    #     # decide the number of parallel process, and the combination method
    #     clf = SUOD(base_estimators=detector_list, n_jobs=2, combination='average',
    #                verbose=False)

    else:
        print("{} isn't a valid algorithn name!".format(clf_name))
        raise NotImplementedError

    clf.fit(data)
    # get the prediction labels and outlier scores of the training data
    y_train_pred = clf.labels_  # binary labels (0: inliers, 1: outliers)
    y_train_scores = clf.decision_scores_  # raw outlier scores
    return y_train_pred, y_train_scores, clf


# from pyod.utils.example import visualize

if __name__ == "__main__":
    contamination = 0.1  # percentage of outliers
    n_train = 200  # number of training points
    n_test = 400  # number of testing points

    # Generate sample data
    X_train, X_test, y_train, y_test = \
        generate_data(n_train=n_train,
                      n_test=n_test,
                      n_features=6,
                      contamination=contamination,
                      random_state=42)

    # train kNN detector
    clf_name = 'SUOD'
    label, score, model = outlier_detection(X_train, clf_name)

    print(label, y_train)