from src.function import *
from mycluster.canopy import Canopy,show


if __name__ == '__main__':

    my_tx = [

    ]
    # 01 读取日志文件，解析数据，生成交易列表
    tx_list,ip_list=get_info_from_bcclient_log("data/1219/nohup_1220.out", "data/1219/ip_tx_timestamp.txt")

    # 02 对易列表，把每个节点的转发由时间戳 改为 相对时间戳
    change_relative_timestamp(tx_list)

    # 03 求每个交易的权重向量,ip顺序以ip_list为准
    compute_weight_vertor(tx_list, ip_list)

    print("个数:"+str(len(tx_list)))
    # 04 计算皮尔逊相关系数阵
    print("05 计算皮尔逊相关系数阵")

    pearson=matrix_pearson(tx_list)

    # 06聚类：
    my_SpectralCoclustering(pearson)

    print("---------------分割线-----------------------------------------------")
    # 提取出测试集（对比集）里的交易
    compare = []
    for index, i in enumerate(tx_list):
        if i["tx_id"] in my_tx:
            compare.append(i)
            # print(index)
    #混合进去一些随机的交易

    ip_list_compare = []

    for tx in compare:
        for node in tx["list"]:
            if node["ip"] not in ip_list_compare:
                ip_list_compare.append(node["ip"])
    len(ip_list_compare)
    for tx in compare:
        tx["weight1"] = [0] * len(ip_list_compare)  # 创建全0列表

        mid_timestamp = 0
        # 这里不太严谨，考虑按行读取时，应该也是按时间戳顺序的， 所以tx["list"]里面的时间戳应该是有序的，直接取中间的作为中位数，不行再算
        if len(tx["list"]) % 2 == 1:
            mid_timestamp = tx["list"][len(tx["list"]) // 2]["timestamp"]
        else:
            mid_timestamp = (tx["list"][len(tx["list"]) // 2]["timestamp"] + tx["list"][len(tx["list"]) // 2 - 1][
                "timestamp"]) / 2
        k = mid_timestamp / 0.8325546111577  # 根号(-ln(0.5))
        for node in tx["list"]:  # 这里的node其实就可以治一个数据包{ip，timestamp}
            index = ip_list_compare.index(node['ip'])
            if k == 0:
                tx["weight1"][index] = 1.0
            else:
                tx["weight1"][index] = exp(-(node["timestamp"] * 1.0 / k) ** 2)

    # 构建测试集
    matrix=[]
    for tx in (tx_list):
        matrix.append(tx["weight"])

    X = np.array(matrix)
    # 测试肘部法
    #num=zhoubu(X)
    #print(num)

    #canopy法
    t1 = 4
    t2 = 3
    gc = Canopy(X)
    gc.setThreshold(t1, t2)
    canopies = gc.clustering()
    print('Get %s initial centers.' % len(canopies))
    #show(canopies, X, t1, t2)
    min_centers=9999999
    min_t1=0
    min_t2=0
    for t2 in np.arange(3.5,7,0.1):
        for t1 in np.arange(t2+0.1,7.1,0.1):
            gc = Canopy(X)
            gc.setThreshold(t1, t2)
            canopies = gc.clustering()
            print('Get %s initial centers.t1：%s，t2：%s' % (len(canopies),t1,t2))
            show(canopies, X, t1, t2)
            if len(canopies)< min_centers:
                min_t2=t2
                min_t1=t1
                min_centers=len(canopies)

    print("min_t1_t2:"+str(min_t1)+","+str(min_t2)+",最小分类数为：%s" %min_centers)



    print("05 计算皮尔逊相关系数阵")
    #peazrson_compare=matrix_pearson(compare)
    print("06 谱协同聚类结果")
    #my_SpectralCoclustering(pearson_compare,7)

