import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.spatial.kdtree import KDTree
from sklearn.cluster import DBSCAN

def readDataSet(filename, div=2):
    data = np.loadtxt(filename)
    return data

def readDataSetwithLabel(filename, div=2):
    matrix = np.loadtxt(filename)
    label = matrix[:, -1]
    matrix = np.delete(matrix, -1, axis=1)
    return matrix, label

def readLabel(filename):
    data = pd.read_csv(filename, header=None)
    matrix = np.array(data)
    return matrix

def get_dist(location):
    length = location.shape[0]
    dist = np.zeros((length, length))
    ll = []
    begin = 0
    while begin < length - 1:
        end = begin + 1
        while end < length:
            d = np.linalg.norm(location[begin] - location[end])
            dist[begin][end] = d
            dist[end][begin] = d
            ll.append(d)
            end = end + 1
        begin = begin + 1
    ll = np.array(ll)
    return dist, ll

def sortLL(ll):
    return np.sort(ll)

def get_dc(sortedll, p):
    postion = int(p * len(sortedll))
    dc = sortedll[postion]
    return dc

def get_rho(dist, dc):
    length = dist.shape[0]
    # 计算局部密度
    rho = np.zeros((length, 1))
    # guassian kernel
    begin = 0
    while begin < length - 1:
        end = begin + 1
        while end < length:
            rho[begin] = rho[begin] + math.exp(-(dist[begin][end] / dc) ** 2)
            rho[end] = rho[end] + math.exp(-(dist[begin][end] / dc) ** 2)
            end = end + 1
        begin = begin + 1
    return rho

def get_rho_sort_ind(rho):
    length = len(rho)
    sorted_ind = rho.reshape((1, length)).argsort().reshape((length, 1))
    begin = 0
    end = length-1
    while begin < end:
        tmp = sorted_ind[begin][0]
        sorted_ind[begin][0] = sorted_ind[end][0]
        sorted_ind[end][0] = tmp
        begin = begin + 1
        end = end -1
    return sorted_ind


def get_delta(rho_sorted_ind, dist):
    '''
    获取delta值的同时标注每一个点最近的高密度点nearest
    :param rho_sorted_ind: 降序排序的密度索引
    :param dist: 距离矩阵
    :return: delta: delta数列, nearest: 数列，记录每个点最近的邻居的索引
    '''
    length = dist.shape[0]
    delta = np.ones((length, 1)) * np.inf
    nearest = np.zeros((length, 1), dtype=int)

    # 对于局部密度最大值取所有点的最大距离
    maxDist = -1
    maxRhoIndex = rho_sorted_ind[0][0]
    for i in range(length):
        if dist[maxRhoIndex][i] > maxDist:
            maxDist = dist[maxRhoIndex][i]
    delta[maxRhoIndex] = maxDist
    nearest[maxRhoIndex] = maxRhoIndex

    # 对于其他点取密度比起大的点中的距离最小值
    begin = 1
    while begin < length:
        index1 = rho_sorted_ind[begin][0]
        end = 0
        while end < begin:
            index2 = rho_sorted_ind[end][0]
            if dist[index1][index2] < delta[index1]:
                delta[index1] = dist[index1][index2]
                nearest[index1] = index2
            end = end + 1
        begin = begin + 1

    return delta, nearest

def plotRhoDelta(dir, rho, delta):
    fig = plt.figure(figsize=(10, 10))
    plt.plot(rho, delta, 'o')
    plt.xlabel('rho'), plt.ylabel('delta')
    output = dir + '/decisiongraph.png'
    plt.savefig(output)
    plt.show()

def get_Rsets(delta, dt):
    ind = np.where(delta >= dt)[0]
    return ind

def initial_clustering(ind, nearest, sorted_ind):
    length = len(nearest)
    result = np.zeros((length, 1), dtype=int)
    visited = np.zeros((length, 1), dtype=int)
    count = 1
    for i in ind:
        result[i] = count
        visited[i] = 1
        count = count + 1

    for i in range(length):
        the_ind = sorted_ind[i]
        if visited[the_ind] == 0:
            if result[nearest[the_ind]] != 0:
                result[the_ind] = result[nearest[the_ind]]
                visited[the_ind] = 1

    return result

def get_w(ind, rho, num):
    max_rho = -1
    min_rho = np.inf
    length = len(ind)
    rsets_rho = np.zeros((length, 1), dtype=float)
    count = 0
    for i in ind:
        if rho[i] > max_rho:
            max_rho = rho[i]
        if rho[i] < min_rho:
            min_rho = rho[i]
        rsets_rho[count] = rho[i]
        count = count + 1
    w = (max_rho - min_rho) * 1.0 / num
    return rsets_rho, w

def get_gap(r_ind, rsets_rho, w, length):
    '''
    对rsets通过密度进行划分成不同的等级
    :param r_ind:
    :param rsets_rho:
    :param w:
    :param length:
    :return:
    '''
    r_length = len(r_ind)
    rsets_rho_sorted_ind = rsets_rho.reshape((1, r_length)).argsort().reshape((r_length, 1))
    gap = np.zeros((r_length-1, 1), dtype=float)
    level = np.zeros((length, 1), dtype=int)
    begin = 0
    count = 1
    for i in range(r_length - 1):
        delta = (rsets_rho[rsets_rho_sorted_ind[i+1]] - rsets_rho[rsets_rho_sorted_ind[i]])[0][0]
        if delta > w * 2:
            gap[i] = delta
            while begin < i + 1:
                level[r_ind[rsets_rho_sorted_ind[begin]]] = count
                begin = begin + 1
            count = count + 1
            if begin == r_length - 1:
                level[r_ind[rsets_rho_sorted_ind[begin]]] = count
        else:
            gap[i] = 0
    numl = np.max(level)
    return rsets_rho_sorted_ind, gap, level, numl

def assaign_level(level, initial, r_ind):
    '''
    将其他点归类到在上一步划分的level数列中
    :param level:
    :param initial:
    :param r_ind:
    :return:
    '''
    for i in r_ind:
        theCluster = initial[i]
        level[np.where(initial== theCluster)] = level[i]

def clustering_when_numl_1(ind, nearest):
    length = len(nearest)
    result = np.zeros((length, 1), dtype=int)
    visited = np.zeros((length, 1), dtype=int)
    count = 1
    for i in ind:
        result[i] = count
        visited[i] = 1
        count = count + 1

    for i in range(length):
        if visited[i] == 0:
            if result[nearest[i]] != 0:
                result[i] = result[nearest[i]]

    return result

def getKNN(matrix, k):
    '''
    获得knn连接图
    :param matrix:  坐标信息
    :param k: k个最近邻
    :return: dd: 距离矩阵，最近的k个点的距离
    :return: ii: 索引矩阵，最近的k个点的索引
    '''
    kd_tree = KDTree(matrix)
    dd, ii = kd_tree.query(matrix, k = k+1)
    dd = np.delete(dd, 0, axis=1)
    ii = np.delete(ii, 0, axis=1)
    return ii

def snnc(k_nearset):
    length = k_nearset.shape[0]
    A = np.zeros((length, length), dtype=int)
    begin = 0
    while begin < length:
        s1 = set(k_nearset[begin])
        end = begin + 1
        while end < length:
            s2 = set(k_nearset[end])
            if len(s1.intersection(s2)) > 1:
                A[begin][end] = 1
                A[end][begin] = 1
            end = end + 1
        begin = begin + 1

    result = np.zeros((length, 1), dtype=int)
    isClustered = np.zeros((length, 1), dtype=int)
    count = 1
    begin = 0
    while begin < length:
        end = begin + 1
        while end < length:
            if A[begin][end] == 1:
                if isClustered[begin] == 0:
                    isClustered[begin] = 1
                    isClustered[end] = 1
                    result[begin] = count
                    count = count + 1
                    result[end] = result[begin]
                else:
                    result[end] = result[begin]
                    isClustered[end] = 1
            end = end + 1
        begin = begin + 1

    result[np.where(result == 0)] = count
    return result, count

def auto_snnc(kl, matrix):
    k = np.ceil(np.sqrt(kl))
    k_nearest = getKNN(matrix, k)
    return snnc(k_nearest)

def get_bp(cluster):
    counts = np.max(cluster)
    total = len(cluster)
    mean = (1.0 * total) / counts
    bp = np.zeros((total, 1), dtype=int)
    for i in range(counts):
        theCluster = i + 1
        points = np.where(cluster == theCluster)[0]
        if len(points) < mean:
            bp[i] = 1
    return bp

def reassaign_bp(result, bp, r_set, dist, level):
    bp_points = np.where(bp == 1)[0]
    for i in bp_points:
        min_dist = np.inf
        for j in r_set:
            if dist[i][j] < min_dist:
                min_dist = j
                result[i] = result[j]
                level[i] = level[j]

def get_x_low(p, r_ind, rsets_rho_sorted_ind, gap):
    '''
    获得第p个密度等级中的x_low的全局索引
    :param p:
    :param r_ind:
    :param rsets_rho_sorted_ind:
    :param gap:
    :return:
    '''
    count = 1
    r_length = len(r_ind)
    begin = 0
    while begin < r_length - 1:
        if gap[begin] > 0:
            count = count + 1
            if count == p:
                break
        begin = begin + 1
    return r_ind[rsets_rho_sorted_ind[begin + 1]]

def get_x_high(p, r_ind, rsets_rho_sorted_ind, gap):
    '''
    获得第p个密度等级中的x_low的全局索引
    :param p:
    :param r_ind:
    :param rsets_rho_sorted_ind:
    :param gap:
    :return:
    '''
    count = 1
    r_length = len(r_ind)
    begin = 0
    while begin < r_length - 1:
        if gap[begin] > 0:
            count = count + 1
            if count == p:
                break
        begin = begin + 1
    end = begin + 1
    if end >= r_length - 1:
        return r_ind[rsets_rho_sorted_ind[end]]
    while end < r_length - 1:
        if gap[end] > 0:
            break
        end = end + 1
    return r_ind[rsets_rho_sorted_ind[end]]

def get_c_low_num(result, x_low):
    return len(np.where(result == result[x_low])[0])

def get_c_high_num(result, x_hgih):
    return len(np.where(result == result[x_hgih])[0])

def get_x_far(result, x_low, dist):
    points = np.where(result == result[x_low])[0]
    max_dist = -1
    x_far = x_low
    for i in points:
        if dist[x_low][i] > max_dist:
            x_far = i
            max_dist = dist[x_low][i]
    return x_far

def get_sim(p, level, x_far, dist):
    points = np.where(level == p)[0]
    length = len(points)
    sim = np.zeros((length - 1, 1), dtype=float)
    count = 0
    for i in points:
        if i != x_far:
            sim[count] = dist[i][x_far]
            count = count + 1
    return np.sort(sim.reshape((1, length-1))).reshape((length - 1, 1))


def get_Eps(sim, c_low_num):
    eps = sim[int(np.ceil(np.sqrt(c_low_num)))][0]
    return eps

def get_min_pts_high(result, x_high, dist, eps):
    points = np.where(result == result[x_high])[0]
    min_pts = 0
    for i in points:
        if i != x_high:
            if dist[x_high][i] < eps:
                min_pts = min_pts + 1
    return min_pts

def get_min_pts_low(result, x_low, x_far, dist, eps):
    points = np.where(result == result[x_low])[0]
    min_pts = 0
    for i in points:
        if i != x_far:
            if dist[x_far][i] < eps:
                min_pts = min_pts + 1
    return min_pts

def get_min_pts(min_pts_low, min_pts_high):
    return (min_pts_high + min_pts_low) / 2

def dbscan_with_param(pts, eps, matrix, begin):
    dbsc = DBSCAN(eps=eps, min_samples=pts).fit(matrix)
    result = dbsc.labels_
    result = np.array(result)
    for i in range(len(result)):
        if result[i] == -1:
            continue
        else:
            result[i] = result[i] + begin
    return result.reshape((len(result), 1))

def get_co_ind(result):
    return np.where(result == -1)[0]

def get_ncd_num(result, first_begin):
    return np.max(result) - first_begin

def get_new_center(result, rho):
    count = np.max(result)
    centers = np.zeros((count, 1), dtype=int)
    for i in range(count):
        points = np.where(result == i+1)[0]
        max_rho = -1
        center = points[0]
        for j in range(len(points)):
            if rho[points[j]] > max_rho:
                center = points[j]
                max_rho = rho[points[j]]
        centers[i] = center
    return centers

def get_mc_count(centers, rho):
    mc = np.zeros((len(centers), 1), dtype=float)
    num = len(centers)
    for i in range(num):
        mc[i] = rho[centers[i]]
    mean_rho = np.mean(mc)
    mc_count = 0
    mc_centers = []
    for i in mc:
        if i < mean_rho:
            mc_count = mc_count + 1
            mc_centers.append(centers[i])
    return mc_count, mc_centers

def re_assaign_mc(result, mc_centers, mc_count, ncd_num, dist):
    if mc_count < ncd_num * 1.0 / 2:
        #需要将所有的微小簇进行重分配
        ncd_points = np.where(result != result[mc_centers] and result != -1)
        mc_points = np.where(result == result[mc_centers])
        for i in mc_points[0]:
            min_dist = np.inf
            new_cluster = result[ncd_points[0][0]]
            for j in ncd_points[0]:
                if dist[i][j] < min_dist:
                    min_dist = dist[i][j]
                    new_cluster = result[j]
            result[i] = new_cluster
    else:
        return

from sklearn.decomposition import PCA

def plotResult(matrix, result, filename):
    pca = PCA(n_components=2)  # n_components设置降维后的特征数
    pca.fit(matrix)  # 拟合
    location = pca.transform(matrix)  # 获取新矩阵

    length = len(result)
    markers = ['.', '*', '+', 'x', '^']
    colors = ['maroon', 'red', 'peru', 'gold', 'olive', 'yellowgreen', 'lawngreen', 'springgreen']
    colors = colors + ['turquoise', 'teal', 'deepskyblue', 'dodgerblue', 'royalblue', 'navy']
    colors = colors + ['slategrey', 'orchid', 'm', 'deeppink', 'crimson']
    plt.figure(figsize=(10, 10))

    for i in range(0, length):
        index = int(result[i])
        if index == -1:
            plt.plot(location[i][0], location[i][1], color=(0,0,0),  marker='.')
        else:
            plt.plot(location[i][0], location[i][1], color=colors[index%19], marker=markers[index%5])
    plt.xlabel('Attribute 1'), plt.ylabel('Attribute 2')

    plt.savefig(filename)
    plt.show()

def vdpc(input_file, is_with_label, output_dir, p = 0.5):
    if is_with_label:
        data, label = readDataSetwithLabel(input_file)
    else:
        data = readDataSet(input_file)
    dist, ll = get_dist(data)
    length = data.shape[0]
    ll = sortLL(ll)
    p = 0.5
    dc = get_dc(ll, p)
    rho = get_rho(dist, dc)
    rho_sort_ind = get_rho_sort_ind(rho)
    delta, nearest = get_delta(rho_sort_ind, dist)
    plotRhoDelta(output_dir, rho, delta)
    dt = float(input("请输入dt\n"))
    # dt = 5.5
    rset_ind = get_Rsets(delta, dt)
    initial_result = initial_clustering(rset_ind, nearest, rho_sort_ind)
    num = 10
    rsets_rho, w = get_w(rset_ind, rho, num)
    rsets_rho_sorted_ind, gap, level, numl = get_gap(rset_ind, rsets_rho, w, length)
    assaign_level(level, initial_result, rset_ind)
    if numl == 1:
        return initial_result
    else:
        level_1_points = np.where(level == 1)[0]
        level_1_matrix = data[level_1_points]
        kl = level_1_matrix.shape[0]
        level_1_result, level_1_count = auto_snnc(kl, level_1_matrix)
        bp = get_bp(level_1_result)
        reassaign_bp(initial_result, bp, rset_ind, dist, level)
        final_result = np.zeros((length, 1), dtype=int)
        final_result[level_1_points] = level_1_result
        level_count = level_1_count
        for p in np.arange(2, numl+1):
            level_points = np.where(level == p)[0]
            level_matrix = data[level_points]
            x_low = get_x_low(p, rset_ind, rsets_rho_sorted_ind, gap)[0]
            x_high = get_x_high(p, rset_ind, rsets_rho_sorted_ind, gap)[0]
            c_low_num = get_c_low_num(initial_result, x_low)
            c_high_num = get_c_high_num(initial_result, x_high)
            x_far = get_x_far(initial_result, x_low, dist)
            sim = get_sim(p, level, x_far, dist)
            eps = get_Eps(sim, c_low_num)
            min_pts_high = get_min_pts_high(initial_result, x_high, dist, eps)
            min_pts_low = get_min_pts_low(initial_result, x_low, x_far, dist, eps)
            min_pts = get_min_pts(min_pts_low, min_pts_high)
            level_result = dbscan_with_param(min_pts, eps, level_matrix, level_count)
            level_count = np.max(level_result) + 1
            final_result[level_points] = level_result
        ncd_num = get_ncd_num(final_result, 0)
        centers = get_new_center(final_result, rho)
        mc_count, mc_centers = get_mc_count(centers, rho)
        re_assaign_mc(final_result, mc_centers, mc_count, ncd_num, dist)
        return final_result, dt


from getRI import rand_index
from getNMI import printNMI

if __name__ == '__main__':
    input_file = 'Jain.txt'
    output_file = 'vdpc_result.txt'
    output_img = 'vdpc_result.png'
    dim = 2
    is_with_label = 0
    result, dt = vdpc(input_file, is_with_label, './')
    matrix, label = readDataSetwithLabel(input_file, dim)
    plotResult(matrix, result, output_img)
    np.savetxt(output_file, result)

    nmi = printNMI("label.txt", output_file)
    ri = rand_index("label.txt", output_file)

    np.savetxt("dt={:.2f}_nmi={:.2f}_ri={:.2f}".format(dt, nmi, ri)+output_file, result)










