import numpy as np
from sklearn.metrics import pair_confusion_matrix


def accuracy(labels_true, labels_pred):
    clusters = np.unique(labels_pred)
    labels_true = np.reshape(labels_true, (-1, 1))
    labels_pred = np.reshape(labels_pred, (-1, 1))
    count = []
    for c in clusters:
        idx = np.where(labels_pred == c)[0]
        labels_tmp = labels_true[idx, :].reshape(-1)
        count.append(np.bincount(labels_tmp).max())
    return np.sum(count) / labels_true.shape[0]


def get_rand_index_and_f_measure(labels_true, labels_pred, beta=1.):
    (tn, fp), (fn, tp) = pair_confusion_matrix(labels_true, labels_pred)
    ri = (tp + tn) / (tp + tn + fp + fn)
    ari = 2. * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn))
    p, r = tp / (tp + fp), tp / (tp + fn)
    f_beta = (1 + beta ** 2) * (p * r / ((beta ** 2) * p + r))
    print("-----------------------------")
    print("TP={}\t| FN={}".format(tp,fn))
    print("-----------------------------")
    print("FP={}\t| TN={}".format(fp, tn))
    print("-----------------------------")
    return ri, ari, f_beta

def persant_of_testTX_in_cluster_list(culster_list,test_tx_list):

    count=0
    for li in culster_list:
        if li in test_tx_list:
            count=count+1
    print("总准确率{}".format((len(test_tx_list)+0.0) / (count + 0.0)))

    #2.改进后的准确率计算
    index_num_cluster = {}  # 记录总类中每个cluster的数量
    index_num_test = {}  # 记录实验样本中每个cluster的数量
    for li in culster_list:
        if index_num_cluster.__contains__(li):
            index_num_cluster[li] = index_num_cluster[li] + 1
        else:
            index_num_cluster[li] = 1
    for li in test_tx_list:
        if index_num_test.__contains__(li):
            index_num_test[li] = index_num_test[li] + 1
        else:
            index_num_test[li] = 1
    fenzi = 0
    fenmu = 0
    lost=0#计数因不占50%以上而导致的样本丢失个数
    wrong=0#实验样本的大占比类中错误样本数量
    for key, value in index_num_test.items():
        if value * 2 >= index_num_cluster[key]:
            fenzi += value
            fenmu += index_num_cluster[key]
            wrong+=(index_num_cluster[key]-value  )
        else:

            lost+=value
    print("修改总准确率{}".format(fenzi / fenmu * 1.0))
    print("样本丢失率{}".format(lost / len(test_tx_list) * 1.0))
    print("测试分类中的错误样本占比{}".format(wrong /(wrong+ len(test_tx_list) )* 1.0))


if __name__ == '__main__':
    y_pred = [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 1]
    y_true = [0, 0, 0, 2, 0, 0, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 0]
    purity = accuracy(y_true, y_pred)  # 计算聚类纯度 or 准确率
    ri, ari, f_beta = get_rand_index_and_f_measure(y_true, y_pred, beta=1.)
    print(f"purity:{purity}\nri:{ri}\nari:{ari}\nf_measure:{f_beta}")
