import numpy as np
from scipy.stats import wasserstein_distance
from scipy.stats import pearsonr
from scipy import stats
from numpy import *
import math
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import DBSCAN
import pandas as pd
import os
from Cluster.cluster_stat import find_max,cluster_stat
from Cluster.score import *



def cos_sim(vector_a, vector_b):
    """
    calculate the cosine similarity between two vectors
    :param vector_a: vector a 
    :param vector_b: vector b
    :return: sim
    """
    vector_a = np.mat(vector_a)
    vector_b = np.mat(vector_b)
    num = float(vector_a * vector_b.T)
    denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)
    sim = num / denom
    return sim

def euclidean_distance(x1,x2,p=2):
    return sum(abs(x1 - x2) ** p) ** (1./p)

def manhatton_distance(x1,x2,p=1):
    return sum(abs(x1 - x2) ** p) ** (1./p)
        
def qiebixuefu_distance(x1,x2):
    return np.linalg.norm(x1-x2,ord=np.inf)

def cos_distance(x1,x2):
    a = sum(x1*x2)
    b = sum(x1 ** 2)**(0.5) * sum(x2 ** 2)**(0.5)
    return 1-a/b

def Square(x, y):
    return np.sum(np.power((x - y), 2))

def Wasserstein(P,Q):
    dists=[i for i in range(len(P))]
    return stats.wasserstein_distance(P,Q)

def calcSBDncc(x,y,s):
    assert len(x)==len(y)
    assert isinstance(s,int)
    length_ = len(x)
    pow_x = 0
    pow_y = 0
    ccs = 0
    for i in range(length_-s):
        ccs +=  x[i+s]*y[i]
        pow_x += math.pow(x[i+s],2)
        pow_y += math.pow(y[i],2)
    dist_x =math.pow(pow_x,0.5)
    dist_y =math.pow(pow_y,0.5)
    dist_xy = dist_x*dist_y
    ncc = ccs/dist_xy
    return ncc
def calcSBD(x,y,s=None):
    assert len(x)==len(y)
    if  s==None:
        length_ = len(x)
        ncc_list = []
        for i in range(int(length_*0.5)):  # 0.5: ensures that at least half of the data is used for the correlation calculation
            ncc_list.append(calcSBDncc(x,y,i))
        ncc = max(ncc_list)
        delay = ncc_list.index(max(ncc_list))
        sbd = 1 - ncc
    else:
        ncc = calcSBDncc(x,y,s)
        delay = s  # Passed as alarm delay number.
        sbd = 1 - ncc #sbd: data is 0-2, so it is more appropriate to return ncc for explaining the correlation here.
    return sbd

def cluster_main(global_config, normal_data_length):
    config = global_config['cluster']
    input_train_data_path = config['input_train_data_path']
    input_test_data_path = config['input_test_data_path']
    train = np.load(input_train_data_path)
    test = np.load(input_test_data_path)
    train_data, train_label = train['x'], train['y']
    # 把前面的normal_data_length 截取掉
    test_data, test_label = test['x'][normal_data_length:], test['y'][normal_data_length:]
    print(f"--cluster_main : train_data{train_data.shape} train_label:{train_label.shape}")
    print(f"--cluster_main : test_data{test_data.shape} test_label:{test_label.shape}")
    # 对train_data进行聚类，根据train_label和聚类结果标注异常簇和正常簇
    train_model = AgglomerativeClustering(n_clusters=2).fit(train_data)
    train_pred = train_model.labels_
    pred_01_sum = {
        0: {0: 0, 1: 0},
        1: {0: 0, 1: 0}
    }
    for pred, label in zip(train_pred, train_label):
        pred_01_sum[pred][label]+=1
    print(f"pred_01_sum:{pred_01_sum}")
    # 计算簇中心向量
    train_pred_dict = {0: [], 1: []}
    for i, pred in enumerate(train_pred):
        train_pred_dict[pred].append(i)
    train_center = np.zeros((2, global_config['graph_embedding']['output_vector_dimension']))
    for cluster_pred, vector_list in train_pred_dict.items():
        train_center[cluster_pred] = np.mean(train_data[vector_list], axis=0)
    # 计算test_data和每个簇中心的距离大小,并对test_data分类
    test_pred_label = []
    for test_data_item in test_data:
        distance0 = euclidean_distance(test_data_item, train_center[0])
        distance1 = euclidean_distance(test_data_item, train_center[1])
        if distance0<distance1:
            test_pred_label.append(0)
        else:
            test_pred_label.append(1)
    print(f"--cluster_main test_pred_label:{test_pred_label}")
    return test_pred_label

    # train_case_index = [0, 1,2,3,4,5,6,7]
    # test_case_index = [0, 1,2,3,4,5,6,7]
    # # train_index: all timestamp in training set cases.
    # train_point_index = []
    # test_point_index = []
    # normal_embedding_len = global_config['normal_window_size']
    # anomaly_embedding_len = global_config['after_window_size']
    # # case_time_interval = normal_embedding_len + anomaly_embedding_len
    # # 训练数据加入正常数据
    # train_point_index += [j for j in range(0, normal_embedding_len)]
    # for i in range(len(train_case_index)):
    #     train_item = [j for j in range(normal_embedding_len+train_case_index[i]*anomaly_embedding_len,normal_embedding_len+(train_case_index[i]+1)*anomaly_embedding_len)]
    #     train_point_index += train_item
    # for i in range(len(test_case_index)):
    #     test_item = [j for j in range(normal_embedding_len+test_case_index[i]*anomaly_embedding_len,normal_embedding_len+(test_case_index[i]+1)*anomaly_embedding_len)]
    #     test_point_index += test_item
    # train_data = []
    # test_data = []
    # train_label = []
    # test_label = []
    # for i in range(len(all_data)):
    #     if i in train_point_index:
    #         train_data.append(all_data[i])
    #         train_label.append(label[i])
    #     if i in test_point_index:
    #         test_data.append(all_data[i])
    #         test_label.append(label[i])

    # print(f"--cluster_main: train_data {len(train_data)} {np.array(train_data).shape} len(train_label) : {len(train_label)}")
    # print(f"--cluster_main: test_data {len(test_data)} {np.array(test_data).shape} len(train_label) : {len(test_label)}")

    # # 区分正常和异常
    # model_1 = AgglomerativeClustering(n_clusters=2).fit(train_data)
    # agg_labels_1 = model_1.labels_
    # # agg_labels_1 = [0 if i==0 else 1 for i in train_label]
    # # print(f"agg_labels_1:{agg_labels_1.shape}--{agg_labels_1.tolist()}")
    # print(f"cluster_main pred:{agg_labels_1}")
    # # print(f"train_label:{train_label}")
    # # for i in range(len(agg_labels_1)):
    # #     if agg_labels_1[i] == 0:
    # #         agg_labels_1[i] =1
    # #     else:
    # #         agg_labels_1[i] =0

#     n_cluster = global_config['cluster_num']
#     n_vector = global_config['graph_embedding']['output_vector_dimension'] # embedding vectors' dimension
#     num = [0 for i in range(n_cluster)]
#     cluster_center_1 = [np.array([0.0 for i in range(n_vector)]) for j in range(n_cluster)]
#     for i in range(len(train_data)):
#         cluster_center_1[agg_labels_1[i]] += train_data[i]
#         num[agg_labels_1[i]] += 1
#     for i in range(n_cluster):
#         if num[i]>0:
#             cluster_center_1[i] /= num[i]
#     cluster_center_type_1 = [0,1]

#     test_pred_label_1 = []
#     for i in range(len(test_data)):
#         temp_center_index = 0
#         temp_center_dist = 100000
#         for j in range(len(cluster_center_1)):
#             if euclidean_distance(test_data[i],cluster_center_1[j]) < temp_center_dist:
#                 temp_center_dist = euclidean_distance(test_data[i],cluster_center_1[j])
#                 temp_center_index = j
#         test_pred_label_1.append(cluster_center_type_1[temp_center_index])

#     anomaly_train_data = []
#     anomaly_train_label = []
#     anomaly_test_data = []
#     anomaly_test_label = []
#     normal_test_label = []
#     for i in range(len(agg_labels_1)):
#         if agg_labels_1[i] == 1:
#             anomaly_train_data.append(train_data[i])
#             anomaly_train_label.append(train_label[i])

#     for i in range(len(test_pred_label_1)):
#         if test_pred_label_1[i] == 1:
#             anomaly_test_data.append(test_data[i])
#             anomaly_test_label.append(test_label[i])
#         else:
#             normal_test_label.append(test_pred_label_1[i])
    
#     result = pd.DataFrame()
#     result['label']= test_label
#     # Failure type Diagnosis
#     n_cluster = global_config['cluster_num'] # cluster numbers
#     # agg_model=KMeans(n_clusters=n_cluster, random_state=0).fit(anomaly_train_data)
#     agg_model = AgglomerativeClustering(n_clusters=n_cluster).fit(anomaly_train_data)
#     # agg_model=DBSCAN(eps=0.07,min_samples=20).fit(anomaly_train_data)#eps is Radius.
#     agg_labels = agg_model.labels_
#     num = [0 for i in range(n_cluster)]
#     true_cluster_center = [np.array([0.0 for i in range(n_vector)]) for j in range(n_cluster)]
#     for i in range(len(anomaly_train_data)):
#         true_cluster_center[agg_labels[i]]+=anomaly_train_data[i]
#         num[agg_labels[i]]+=1
#     for i in range(n_cluster):
#         true_cluster_center[i] /= num[i]
        
#     dis = [100000 for i in range(n_cluster)] 
#     cluster_center = [np.array([0.0 for i in range(n_vector)]) for j in range(n_cluster)]
#     cluster_center_type = [-1 for i in range(n_cluster)]
#     for i in range(len(anomaly_train_data)):
#         if euclidean_distance(anomaly_train_data[i],true_cluster_center[agg_labels[i]]) < dis[agg_labels[i]]:
#             dis[agg_labels[i]]=euclidean_distance(anomaly_train_data[i],true_cluster_center[agg_labels[i]])
#             cluster_center[agg_labels[i]]=anomaly_train_data[i]
#             cluster_center_type[agg_labels[i]]=anomaly_train_label[i]

#     test_pred_label = []
#     for i in range(len(anomaly_test_data)):
#         temp_center_index = 0
#         temp_center_dist = 100000
#         for j in range(len(cluster_center)):
#             if euclidean_distance(anomaly_test_data[i],cluster_center[j]) < temp_center_dist:
#                 temp_center_dist = euclidean_distance(anomaly_test_data[i],cluster_center[j])
#                 temp_center_index = j
#         test_pred_label.append(cluster_center_type[temp_center_index])

#     pred = []
#     j=0
#     for i in range(len(test_pred_label_1)):
#         if test_pred_label_1[i]==1:
#             pred.append(test_pred_label[j])
#             j+=1
#         else:
#             pred.append(test_pred_label_1[i])
   
#     result['result'] = pred
#     if not os.path.exists(os.path.split(config["output_data_path"])[0]):
#         os.mkdir(os.path.split(config["output_data_path"])[0])
#     result.to_csv(config["output_data_path"],index=False)

#     # 10 snapshots to 1 result:
#     cluster_stat(config['output_data_path'],config['stat_output_path'], global_config)
    
#     # score:
#     # score_report(config['stat_output_path'],config['report_output_path'])

# if __name__ == '__main__':
#     cluster_main(None)