'''
refer to
Temporally-Weighted Hierarchical Clustering for Unsupervised Action Segmentation. CVPR2021
Efﬁcient Parameter-free Clustering Using First Neighbor Relations. CVPR2020
'''

import numpy as np
from sklearn import metrics
import scipy.sparse as sp
import torch
import logging
logger = logging.getLogger(__name__)
from experiment import exp_plots, exp_factory

def get_feature_dist(feat, distance='euclidean'):
    return metrics.pairwise.pairwise_distances(feat, feat, metric=distance)

def get_temporal_dist(feat):
    n, m = feat.shape
    dist = np.zeros((n, n))
    rowdata = np.arange(0, n)
    for k in range(n):
        dist[k, k:n] = rowdata[0: n - k]
    dist = dist + dist.T
    dist = dist/ n
    dist = np.exp(dist)
    return dist

def calculate_adjacency_matrix(data, distance='euclidean'):
    n = data.shape[0]
    feature_dist = get_feature_dist(data, distance=distance)
    temporal_dist = get_temporal_dist(data)
    dist = feature_dist * temporal_dist
    np.fill_diagonal(dist, 1e12)
    initial_rank = np.argmin(dist, axis=1)
    # Define adjacency matrix
    adj = sp.csr_matrix((np.ones_like(initial_rank, dtype=np.float32), (np.arange(0, n), initial_rank)), shape=(n, n))
    adj = adj + sp.eye(n, dtype=np.float32, format='csr')
    #adj = adj @ adj.T
    adj = adj.tolil()
    adj.setdiag(0)
    return adj, dist

def count_graph_components(adj, dist_matrix, min_sim=None):
    '''
    Analyze the connected components of a sparse graph, how many components in a graph
    :param adj:
    :param orig_dist:
    :param min_sim:
    :return:
    num_cluster: The number of connected components.
    idx_cluster: The length-N array of labels of the connected components
    '''
    if min_sim is not None:
        adj[np.where((dist_matrix * adj.toarray()) > min_sim)] = 0
    num_components, labels = sp.csgraph.connected_components(csgraph=adj, directed=True, connection='weak', return_labels=True)
    #print('adj graph consists of {} components'.format(num_components))
    return num_components, labels


def merge_components(data, labels, merged_labels):
    # c, a array, store the cluster index of each samples
    if len(merged_labels) != 0:
        _, indices = np.unique(merged_labels, return_inverse=True) # return the indices of the unique array
        merged_labels = labels[indices] # length label
    else:
        merged_labels = labels

    # calculate centriod of each components in graph
    _, nf = np.unique(merged_labels, return_counts=True)
    idx = np.argsort(merged_labels)
    M = data[idx, :]
    M = np.vstack((np.zeros((1, M.shape[1])), M))
    np.cumsum(M, axis=0, out=M)
    cnf = np.cumsum(nf)
    nf1 = np.insert(cnf, 0, 0)
    nf1 = nf1[:-1]
    M = M[cnf, :] - M[nf1, :]
    M = M / nf[:, None]
    return merged_labels, M

def hierarchicalsegment(data, distance='cosine', maxseg = 1,plotflag=False):
    '''

    :param data:
    :param distance:
    :param plotflag:
    :param topK: select number of hierarchical segments, default 3
    :return:
    '''

    adj, init_dist = calculate_adjacency_matrix(data, distance=distance)
    num_components, labels = count_graph_components(adj, dist_matrix=init_dist, min_sim=None)
    merged_labels, cluster_centriod = merge_components(data=data, labels=labels, merged_labels=[])
    #
    exit_clust = 2
    merged_labels_ = merged_labels  # init label_idx
    k = 1
    num_components_list = [num_components]

    while exit_clust > 1:
        adj, init_dist = calculate_adjacency_matrix(cluster_centriod, distance=distance)
        num_components, labels = count_graph_components(adj, dist_matrix=init_dist, min_sim=None)
        merged_labels_, cluster_centriod = merge_components(data=data, labels=labels, merged_labels=merged_labels_)
        merged_labels = np.column_stack((merged_labels, merged_labels_))
        exit_clust = num_components_list[-1] - num_components # cal
        num_components_list.append(num_components)
        if num_components == 1 or exit_clust < 1:
            '''assert len(
                num_components_list) >= topK + 1, 'topK= {} is not satisfied because num_components_list < {}'.format(
                topK, topK + 1)
            col_slice = slice(0, 0 + topK)'''
            num_components_list = num_components_list[-1-maxseg:-1] # chose the last three
            merged_labels = merged_labels[:, -1-maxseg:-1] # chose
            if plotflag:
                exp_plots.plot_cluster_distribution(labels_sequence=merged_labels, rawdata=data)
            break
        k += 1

    return merged_labels, num_components_list


# get segment on each dim and return new augmented sample
def calculate_segment_feature(batch, args):
    batch = batch.cpu().numpy()
    B, L, D = batch.shape
    batch_seg_all = []
    batch_seg_reduce = []
    for b in range(B):
        x_segments_reduced = []
        x_segments_all = []
        x_ = batch[b] # L*D
        merged_labels, num_components_list = hierarchicalsegment(x_, distance=args.seg_distance, maxseg=args.max_seg)
        hier_num = len(num_components_list)
        for k in range(hier_num):
            x_new = np.zeros_like(x_)
            hierlabels = np.array(merged_labels[:, k])
            diff = hierlabels[1:] - hierlabels[0:-1]
            idx = np.where(diff != 0)[0] + 1
            idx = np.insert(idx, [len(idx), 0], [L, 0])
            for j in range(len(idx)-1):
                x__ = x_[idx[j]: idx[j+1], :]
                avg = np.mean(x__, axis=0).reshape(1, -1)
                x_segments_reduced.append(avg)
                x_new[idx[j]: idx[j+1], :] = x_new[idx[j]: idx[j+1], :] + avg
            x_segments_all.append(x_new)
        x_segments_reduced = np.concatenate(x_segments_reduced, axis=0)
        x_segments_all = np.concatenate(x_segments_all, axis=0)
        batch_seg_all.append(torch.from_numpy(x_segments_all).to(args.device))
        batch_seg_reduce.append(torch.from_numpy(x_segments_reduced).to(args.device))
    #batch_seg_all = np.stack(batch_seg_all, axis=0)
    #batch_seg_reduce = np.stack(batch_seg_reduce, axis=0)
    #print(batch_seg_all.shape, batch_seg_reduce.shape)
    return batch_seg_all, batch_seg_reduce
    #return torch.from_numpy(batch_seg_all).to(args.device), torch.from_numpy(batch_seg_reduce).to(args.device)


def fusion_prediction(batch, model, padding, args):
    batch = batch.cpu().numpy()
    B, L, D = batch.shape
    batch_seg_all = []
    batch_seg_reduce = []
    predlist = []
    for b in range(B):
        x_segments_reduced = []
        x_segments_all = []
        x_ = batch[b] # L*D
        merged_labels, num_components_list = hierarchicalsegment(x_, distance=args.seg_distance, maxseg=args.max_seg)
        hier_num = len(num_components_list)
        #print('------------------------------', hier_num)
        for k in range(hier_num):
            x_new = np.zeros_like(x_)
            hierlabels = np.array(merged_labels[:, k])
            diff = hierlabels[1:] - hierlabels[0:-1]
            idx = np.where(diff != 0)[0] + 1
            idx = np.insert(idx, [len(idx), 0], [L, 0])
            for j in range(len(idx)-1):
                x__ = x_[idx[j]: idx[j+1], :]
                avg = np.mean(x__, axis=0).reshape(1, -1)
                x_segments_reduced.append(avg)
                x_new[idx[j]: idx[j+1], :] = x_new[idx[j]: idx[j+1], :] + avg
            x_segments_all.append(x_new)

        x_segments_reduced = np.concatenate(x_segments_reduced, axis=0)
        #print(x_segments_reduced.shape)
        #x_segments_all = np.concatenate(x_segments_all, axis=0)
        xf = torch.cat((torch.from_numpy(x_).to(args.device), torch.from_numpy(x_segments_reduced).to(args.device)), dim=0)
        xf.unsqueeze_(dim=0)
        #print(xf.shape)
        basic_pred = model(xf.float(), key_padding_masks=padding, src_masks=None)[1]
        #print(basic_pred)
        predlist.append(basic_pred)
        #batch_seg_all.append(torch.from_numpy(x_segments_all).to(args.device))
        batch_seg_reduce.append(torch.from_numpy(x_segments_reduced).to(args.device))
    #batch_seg_all = np.stack(batch_seg_all, axis=0)
    #batch_seg_reduce = np.stack(batch_seg_reduce, axis=0)
    #print(batch_seg_all.shape, batch_seg_reduce.shape)
    return torch.cat(predlist, dim=0)
    #return torch.from_numpy(batch_seg_all).to(args.device), torch.from_numpy(batch_seg_reduce).to(args.device)


if __name__ == '__main__':
    import options

    args = options.Options().parse()
    dataset, newds, dataloader = exp_factory.data_factory(args, flag='TRAIN')
    #print('-------------------')
    #print(len(dataset))
    X = dataset[10]
    dataisdb = X[0].numpy()
    merged_labels, num_components_list = hierarchicalsegment(dataisdb, plotflag=True)

    #dataisdb = np.expand_dims(dataisdb, axis=0)
    #calculate_segment_feature(batch=dataisdb)
    '''#print(dataisdb.shape)
    merged_labels, num_components_list= hierarchicalsegment(dataisdb, plotflag=False, task=args.exp_task, topK=2)
    # data, distance='cosine', task='classification', topK=3, plotflag=False
    print(merged_labels)
    print(num_components_list)

    x_reduce, x_all = hierarchicalaverage(dataisdb, merged_labels)
    print(x_all.shape)'''









'''
def get_neighborhood_samples(ts_feat, distance='cosine', window_size=None, window_num=None, min_sim=None):
    
    adj, dist_matrix = calculate_adjacency_matrix(ts_feat, distance)  # build a ts feat over all time points
    n_components, labels = count_graph_components(adj=adj, dist_matrix=dist_matrix, min_sim=min_sim)  # calculate number of graph components, and label of each

    assert window_size < ts_feat.shape[0], 'Window size should be less than sequence length'
    max_idx = len(ts_feat) - 1 - window_size
    min_idx = 0 + 1 + window_size
    idxs = np.random.randint(low=min_idx, high=max_idx, size=window_num)
    samples = []
    for id in idxs:
        s = slice(id - window_size, id + window_size + 1)
        s_item = np.arange(s.start, s.stop)
        feats = ts_feat[s]
        groups = labels[s]
        samples.append((s_item, feats, groups))
    return samples
'''