import networkx as nx
import random
import re
import os
import numpy as np

# 打印所有节点
# print(G.nodes())
def concatenate_vectors(vector_list):
    concatenated_vectors = []
    for i in range(0, len(vector_list), 20):
        concatenated_vector = np.concatenate(vector_list[i:i+20])
        concatenated_vectors.append(concatenated_vector)
    return concatenated_vectors
# print(list(G.nodes(data=True)))

def innner_walk(G,node):
    walk = [node]
    next_node = list(G.successors(node))
    now_node = node
    while len(next_node) != 0 :
        optional_node = [] 
        for value in next_node:
            if G.nodes[now_node]['time'] <=   G.nodes[value]['time']:
                if G.nodes[now_node]['graphNum'] == G.nodes[value]['graphNum']:
                 optional_node.append(value)
        # 仅有一个自环。
        if len(optional_node) == 1 and optional_node[0] == now_node :
            walk.append(now_node)
            return walk
        # 有后继节点，且是 同一进程
        if len(optional_node) !=0:
            chose_node = random.choice(optional_node)
            walk.append(chose_node)
            now_node = chose_node
            next_node = list(G.successors(now_node))
        # 有后继节点，但 非 同一进程。
        if len(optional_node) ==0:
            return walk
    return walk
        
def outer_walk(G,node):
    walk = [node]
    next_node = list(G.successors(node))
    now_node = node
    while len(next_node) != 0 :
        optional_node = [] 
        for value in next_node:
            if G.nodes[now_node]['time']  <=   G.nodes[value]['time']:
                 optional_node.append(value)
        # 仅有一个自环。
        if len(optional_node) == 1 and optional_node[0] == now_node :
            walk.append(now_node)
            return walk
        # 有后继节点
        if len(optional_node) !=0:
            chose_node = random.choice(optional_node)
            walk.append(chose_node)
            now_node = chose_node
            next_node = list(G.successors(now_node))
        # time到头了。但是有下一个节点
        if len(optional_node) ==0:
            return walk
    return walk

def benign_walk():
    walk_path=[]
    directory = '/root/jx_codes/crateGraph/benign'
    benign_file = [f for f in os.listdir(directory) if f.endswith('.gpickle')]
    for benign_graph in benign_file:
        graph_path = os.path.join(directory, benign_graph)
        G = nx.read_gpickle(graph_path)
        for i in range(10):
            random_node = random.choice(list(G.nodes()))
            walk =innner_walk(G,random_node)
            walk = ' '.join([re.sub(r'\d', '', item) for item in walk])
            walk_path.append(walk)
            walk =outer_walk(G,random_node)
            walk = ' '.join([re.sub(r'\d', '', item) for item in walk])
            walk_path.append(walk)
    return walk_path

def malicious_walk():
    walk_path = []
    directory = '/root/jx_codes/crateGraph/malicious'
    malicious_file = [f for f in os.listdir(directory) if f.endswith('.gpickle')]
    j = 0
    for malicious_graph in malicious_file:
        graph_path = os.path.join(directory, malicious_graph)
        G = nx.read_gpickle(graph_path)
        j +=1
        for i in range(10):
            random_node = random.choice(list(G.nodes()))
            walk =innner_walk(G,random_node)
            walk = ' '.join([re.sub(r'\d', '', item) for item in walk])
            walk_path.append(walk)
            walk =outer_walk(G,random_node)
            walk = ' '.join([re.sub(r'\d', '', item) for item in walk])
            walk_path.append(walk)
        if j == 468 :
            break
    return walk_path

benign_path = benign_walk()
print(len(benign_path))

malicious_path = malicious_walk()
print(len(malicious_path))
# # Doc2vec
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
#先分配normal
labels = ['0'] * len(benign_path)
tagged_data = [TaggedDocument(words=_d.lower().split(), tags=[labels[i]]) for i, _d in enumerate(benign_path)]

# 分配malicious
labels = ['1'] * len(malicious_path)
# 为新的路径创建 TaggedDocument 对象并添加到 tagged_data
for i, path in enumerate(malicious_path):
    tagged_data.append(TaggedDocument(words=path.lower().split(), tags=[labels[i]]))

# 重新训练 Doc2Vec 模型
model = Doc2Vec(vector_size=4, window=2, min_count=3, workers=4, epochs=100)
model.build_vocab(tagged_data)
model.train(tagged_data, total_examples=model.corpus_count, epochs=model.epochs)

# 将文档转化为向量
vectors_benign = [model.infer_vector(doc.lower().split()) for doc in benign_path]
vectors_malicious = [model.infer_vector(doc.lower().split()) for doc in malicious_path]

concatenated_benign = concatenate_vectors(vectors_benign)
concatenated_malicious = concatenate_vectors(vectors_malicious)

# from sklearn.ensemble import RandomForestClassifier
# from sklearn.model_selection import cross_val_score
# # 为路径分配标签: 前10个路径标记为0（正常），后10个路径标记为1（恶意）
# labels = [0] * len(vectors_benign) + [1] * len(vectors_malicious)
# vectors = vectors_benign+vectors_malicious
# # 使用随机森林进行分类
# clf = RandomForestClassifier(n_estimators=100)

# # 使用交叉验证评估模型性能
# scores = cross_val_score(clf, vectors, labels, cv=5)
# print(scores) 

# 随机森林
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.model_selection import train_test_split
# from sklearn.metrics import accuracy_score, classification_report

# # 1. 合并数据并创建标签
# X = vectors_benign + vectors_malicious
# y = [0] * len(vectors_benign) + [1] * len(vectors_malicious)

# # 2. 分割数据为训练集和测试集
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# # 3. 使用随机森林进行训练
# clf = RandomForestClassifier(n_estimators=100, random_state=42)
# clf.fit(X_train, y_train)

# # 4. 在测试集上进行评估
# y_pred = clf.predict(X_test)
# accuracy = accuracy_score(y_test, y_pred)

# print(f"Accuracy: {accuracy * 100:.2f}%")
# print("\nClassification Report:")
# print(classification_report(y_test, y_pred))


# Neural Network
import tensorflow as tf
from sklearn.model_selection import train_test_split
import numpy as np

# 合并 benign 和 malicious 向量
X = np.array(concatenated_benign + concatenated_malicious)
print(X.shape)

# 为 benign 创建标签 0，为 malicious 创建标签 1
y = np.array([0] * len(concatenated_benign) + [1] * len(concatenated_malicious))
print(y.shape)

# 分割数据为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 建立模型
model = tf.keras.models.Sequential([
    tf.keras.layers.Dense(32, activation='relu'),
    tf.keras.layers.Dropout(0.2),
    tf.keras.layers.Dense(64, activation='relu'),
    tf.keras.layers.Dropout(0.2),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dropout(0.2),
    tf.keras.layers.Dense(1, activation='sigmoid')
])

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

# 训练模型
model.fit(X_train, y_train, epochs=32, batch_size=32, validation_data=(X_test, y_test))

# 评估模型
test_loss, test_acc = model.evaluate(X_test, y_test)
print("\nTest accuracy:", test_acc)
model.summary()
