#%%
import numpy as np # 线性代数
import pandas as pd # 数据处理，CSV文件I/O（例如 pd.read_csv）

import os
#%%
first_order_df = pd.read_csv('data/first_order_df.csv')
first_order_df.drop(['Unnamed: 0'], axis=1, inplace=True)
first_order_df
#%%
from_counts = first_order_df['From'].value_counts()
to_counts = first_order_df['To'].value_counts()
combined_counts = from_counts.add(to_counts, fill_value=0)

# 移除交易次数少于5次或超过500次的值
filtered_counts = combined_counts.loc[(combined_counts >= 5) & (combined_counts <=500)]

# 删除所有不必要的列
values_to_keep = filtered_counts.index.tolist()
filtered_df = first_order_df[first_order_df['From'].isin(values_to_keep) & first_order_df['To'].isin(values_to_keep)]
filtered_df = filtered_df.reset_index(drop=True)
filtered_df
#%%
first_order_df['isError'].value_counts()
#%%
import random
import networkx as nx

# 创建有向图
graph = nx.DiGraph()

# 向图中添加边
for i in range(len(filtered_df)):
  graph.add_edge(filtered_df['From'][i],filtered_df['To'][i])
#%%
from tqdm import tqdm
walk_length = 10

def get_randomwalk(node, path_length):
    random_walk = [node]
    for i in range(path_length-1):
        temp = list(graph.neighbors(node))
        temp = list(set(temp) - set(random_walk))
        if len(temp) == 0:
            break

        random_node = random.choice(temp)
        random_walk.append(random_node)
        node = random_node
    return random_walk
#%%
all_nodes = list(graph.nodes())
#%%
walk_per_node = [100,200,300]
dictionaries = {}
for i in range(len(walk_per_node)):
  dict_name = 'D' + str(i+1)
  stats = {}
  random_walks = []
  for n in tqdm(all_nodes):
    for j in range(walk_per_node[i]):
      random_walks.append(get_randomwalk(n,walk_length))
  total_edges=0
  for k in random_walks:
    total_edges+=len(k)
  stats['Labelled Nodes'] = walk_per_node[i]
  stats['Edges'] = total_edges
  stats['Average Degree'] = total_edges/(walk_per_node[i]*len(all_nodes))
  dictionaries[dict_name] = stats
#%%
dictionaries
#%%
from gensim.models import Word2Vec
model = Word2Vec(window = 4, sg = 1, hs = 0, negative = 10, alpha=0.03, min_alpha=0.0007, seed = 14)
model.build_vocab(random_walks, progress_per=2)
model.train(random_walks, total_examples = model.corpus_count, epochs=20, report_delay=1)

model.wv
#%%
from tqdm import tqdm

# 计算所有节点的度
degrees = graph.degree()
in_degrees = graph.in_degree()
out_degrees = graph.out_degree()

data = []
nodes = graph.nodes()
#%%
for node in tqdm(list(nodes)):
    degree = degrees[node]
    in_degree = in_degrees[node]
    out_degree = out_degrees[node]
    in_degree_ratio = in_degree / out_degree if out_degree != 0 else float('inf')
    out_degree_ratio = out_degree / in_degree if in_degree != 0 else float('inf')
    transfer_out = filtered_df.loc[filtered_df['From'] == node, 'Value'].sum()
    transfer_in = filtered_df.loc[filtered_df['To'] == node, 'Value'].sum()
    transaction = transfer_out+transfer_in
    transaction_diff = transfer_in-transfer_out
    transaction_ratio = transfer_in / transfer_out if transfer_out != 0 else float('inf')
    transfer_in_ratio = transfer_in / in_degree if in_degree != 0 else float('inf')
    transfer_out_ratio = transfer_out / out_degree if out_degree != 0 else float('inf')
    neighbours = len(set(graph.neighbors(node)))

    timestamp_diff = ((filtered_df.loc[filtered_df['From'] == node, 'TimeStamp'].diff()).sum() + (filtered_df.loc[filtered_df['To'] == node, 'TimeStamp'].diff()).sum())
    avg_timestamp_diff = timestamp_diff/(len(filtered_df.loc[filtered_df['From'] == node, 'TimeStamp'])+len(filtered_df.loc[filtered_df['To'] == node, 'TimeStamp']))
    inv_timestamp_freq = 1/avg_timestamp_diff
    node_dict = {"Node": node,
                 "Total Degree": degree,
                 "Out-Degree": out_degree,
                 "In-Degree": in_degree,
                 "Out-Degree Ratio": out_degree_ratio,
                 "In-Degree Ratio": in_degree_ratio,
                 "Sum of Transactions": transaction,
                 "Transfer-Out Transaction": transfer_out,
                 "Transfer-In Transaction": transfer_in,
                 "Transaction Difference": transaction_diff,
                 "Transaction_Ratio": transaction_ratio,
                 "Transfer-In Ratio": transfer_in_ratio,
                 "Transfer-Out Ratio": transfer_out_ratio,
                 "Number of Neighbours": neighbours,
                 "Inverse Timestamp Frequency": inv_timestamp_freq
                 }
    data.append(node_dict)
#%%
timestamp_nodes = []
for i in tqdm(range(len(data))):
    fr = list(filtered_df[filtered_df['From']==data[i]['Node']]['TimeStamp'])
    to = list(filtered_df[filtered_df['To']==data[i]['Node']]['TimeStamp'])
    fr = fr+to
    timestamp_nodes.append(fr)
    
#%%
 nodes = [
    [node['Total Degree'], node['Out-Degree'], node['In-Degree'],
     node['Out-Degree Ratio'], node['In-Degree Ratio'], node['Sum of Transactions'],
     node['Transfer-Out Transaction'], node['Transfer-In Transaction'],
     node['Transaction Difference'], node['Transaction_Ratio'],
     node['Transfer-In Ratio'], node['Transfer-Out Ratio'],
     node['Number of Neighbours'], node['Inverse Timestamp Frequency']]
    for node in data
]

edges = [
    [ node['Out-Degree'], node['In-Degree'],
     node['Out-Degree Ratio'], node['In-Degree Ratio'],
     node['Transfer-Out Transaction'], node['Transfer-In Transaction'],
     node['Transfer-In Ratio'], node['Transfer-Out Ratio']]
    for node in data
]
#%%
import torch
import torch.nn as nn
import torch.nn.functional as F
import gc
gc.collect()
#%%
class TGATLayer(nn.Module):
    def __init__(self, input_dim, edge_dim, hidden_dim, num_heads):
        super(TGATLayer, self).__init__()
        self.num_heads = num_heads
        # 用于'查询'，'键'和'值'的线性投影
        self.Wq = nn.Linear(input_dim + edge_dim, hidden_dim * num_heads)
        self.Wk = nn.Linear(input_dim + edge_dim, hidden_dim * num_heads)
        self.Wv = nn.Linear(input_dim + edge_dim, hidden_dim * num_heads)

        # 输出线性投影
        self.Wout = nn.Linear(hidden_dim, hidden_dim * num_heads)
        if (self.Wout).in_features == 1:
          self.Wout = nn.Linear(hidden_dim * num_heads, hidden_dim)
    def forward(self, node_features, edge_features, timestamps_list, num_nodes):
        # 连接节点特征和边特征
        Z = torch.cat([node_features, edge_features], dim=1)
        # 线性投影
        Q = self.Wq(Z).view(-1, num_nodes, hidden_dim)
        K = self.Wk(Z).view(-1, num_nodes, hidden_dim)
        V = self.Wv(Z).view(-1, num_nodes, hidden_dim)

        # 沿新维度连接时间戳
        timestamps_tensor = torch.stack(timestamps_list, dim=1)

        # 注意力机制
        attention_scores = torch.matmul(Q, K.transpose(1, 2)) / torch.sqrt(torch.tensor(hidden_dim, dtype=torch.float32))
        attention_weights = F.softmax(attention_scores, dim=-1)
        attention_weights = torch.nan_to_num(attention_weights, nan=0)
        V = torch.nan_to_num(V, nan=0)
        attended_values = torch.matmul(attention_weights, V)
        attended_values = attended_values.mean(dim=0, keepdim=True)

        # 输出线性投影
        output = self.Wout(attended_values.view(-1, hidden_dim))
        return output
#%%
class MultiHeadAttention(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_heads, node_features, edge_dim):
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        self.head_dim = input_dim // num_heads
        self.attention_layers = nn.ModuleList([TGATLayer(input_dim, edge_dim, hidden_dim, num_heads) for _ in range(num_heads)])

    def forward(self, node_features, edge_features, timestamps_list, num_nodes):
        head_outputs = [layer(node_features, edge_features, timestamps_list, num_nodes) for layer in self.attention_layers]
        concatenated_representations = torch.cat(head_outputs, dim=-1)
        return concatenated_representations

#%%
class PDTGAWithGraphAttention(nn.Module):
    def __init__(self, input_dim, edge_feature_dim, time_feature_dim, hidden_dim, num_layers, num_heads):
        super(PDTGAWithGraphAttention, self).__init__()
        self.graph_attention = MultiHeadAttention(input_dim, hidden_dim, num_heads, node_features, edge_features_dim)
        self.tgat_layers = nn.ModuleList([TGATLayer(input_dim, edge_feature_dim, time_feature_dim, hidden_dim) for _ in range(num_layers)])
        self.FFN = nn.Linear(hidden_dim * (input_dim + time_feature_dim + num_layers) + input_dim + time_dim, hidden_dim)

    def forward(self, node_features, edge_features, timestamps, num_nodes):
        # 计算图注意力
        graph_attention_output = self.graph_attention(node_features, edge_features, timestamps, num_nodes)
        hidden_representations = [node_features]
        for i, tgat_layer in enumerate(self.tgat_layers):
            hidden_representations.append(tgat_layer(hidden_representations[i], edge_features, timestamps, num_nodes))
        # 将TGAT输出与图注意力输出连接起来
        concatenated_representations = torch.cat([graph_attention_output] + hidden_representations, dim=-1)
        concatenated_representations = torch.nan_to_num(concatenated_representations, nan=0)
        output = self.FFN(concatenated_representations)
        return output
#%%
# 示例用法:
num_nodes = len(data)
node_features_dim = 14
edge_features_dim = 8
time_dim = 1
hidden_dim = node_features_dim + edge_features_dim
num_heads = 4
output_dim = 1
num_layers = 1
#%%
# 然后可以将数据传递给模型
node_features = torch.tensor(nodes, dtype=torch.float32)
edge_features = torch.tensor(edges, dtype=torch.float32)
timestamps = [torch.tensor(ts, dtype=torch.float32) for ts in timestamp_nodes]
max_length = max(len(ts) for ts in timestamps)
padded_timestamps = [F.pad(ts, (0, max_length - len(ts))) for ts in timestamps]

gat_model = PDTGAWithGraphAttention(node_features_dim, edge_features_dim, time_dim, hidden_dim, num_layers, num_heads)
output = gat_model(node_features, edge_features, padded_timestamps, num_nodes)
#%%
output
#%%
predictions = torch.sigmoid(output)
predictions
#%%
nodes_list = list(graph.nodes())
result_data = []

for node in tqdm(nodes_list):
    is_error_value = int(any(first_order_df[first_order_df['From'] == node]['isError'] == 1) or any(first_order_df[first_order_df['To'] == node]['isError'] == 1))
    result_data.append((node, is_error_value))

result_df = pd.DataFrame(result_data, columns=['Node', 'isError'])
result_df
#%%
# 导入必要的库
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

X = output

# 将'IsError'列转换为张量
y = torch.tensor(result_df['isError'].values, dtype=torch.float32)

# 将数据分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 定义MLP模型
class MLPClassifier(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(MLPClassifier, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_dim, output_dim)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        x = self.sigmoid(x)
        return x
# 实例化模型
embedding_dim = X_train.shape[1]
hidden_dim = 64
output_dim = 1
mlp_model = MLPClassifier(embedding_dim, hidden_dim, output_dim)

# 在测试集上进行前向传播
with torch.no_grad():
    test_outputs = mlp_model(X_test)
    predictions = (test_outputs.squeeze() > 0.5).float()
    accuracy = accuracy_score(y_test.numpy(), predictions.numpy())
    print(f'测试集准确率: {accuracy * 100:.2f}%')

#%%
from sklearn.metrics import precision_score, recall_score, f1_score
precision = precision_score(y_test.numpy(), predictions.numpy())
recall = recall_score(y_test.numpy(), predictions.numpy())
f1 = f1_score(y_test.numpy(), predictions.numpy())

print(f'精确率: {precision * 100:.2f}%')
print(f'召回率: {recall * 100:.2f}%')
print(f'F1分数: {f1 * 100:.2f}%')

#%% 保存模型
import os

# 确保model目录存在
os.makedirs('model', exist_ok=True)

# 保存PDTGA模型
torch.save(gat_model.state_dict(), 'model/pdtga_model.pth')
print("PDTGA模型已保存到 model/pdtga_model.pth")

# 保存MLP分类器模型
torch.save(mlp_model.state_dict(), 'model/mlp_classifier.pth')
print("MLP分类器模型已保存到 model/mlp_classifier.pth")

# 保存模型参数信息，便于后续加载模型
model_info = {
    'node_features_dim': node_features_dim,
    'edge_features_dim': edge_features_dim,
    'time_dim': time_dim,
    'hidden_dim': hidden_dim,
    'num_heads': num_heads,
    'num_layers': num_layers,
    'embedding_dim': embedding_dim,
    'mlp_hidden_dim': hidden_dim,
    'output_dim': output_dim
}

import json
with open('model/model_info.json', 'w') as f:
    json.dump(model_info, f)
print("模型参数信息已保存到 model/model_info.json")

def plot_roc_curve(y_true, y_prob, model_name):
    # 多分类情况
    if len(np.unique(y_true)) > 2:
        y_bin = label_binarize(y_true, classes=np.unique(y_true))
        n_classes = y_bin.shape[1]
        
        plt.figure(figsize=(10, 8))
        for i in range(n_classes):
            fpr, tpr, _ = roc_curve(y_bin[:, i], y_prob[:, i])
            roc_auc = auc(fpr, tpr)
            plt.plot(fpr, tpr, lw=2, label=f'ROC曲线 (类别 {i}) (AUC = {roc_auc:.2f})')
    else:
        # 二分类情况
        if y_prob.shape[1] > 1:  # 如果两个类别的概率都可用
            fpr, tpr, _ = roc_curve(y_true, y_prob[:, 1])
        else:  # 如果只有正类的概率可用
            fpr, tpr, _ = roc_curve(y_true, y_prob)
        roc_auc = auc(fpr, tpr)
        plt.figure(figsize=(10, 8))
        plt.plot(fpr, tpr, lw=2, label=f'ROC曲线 (AUC = {roc_auc:.2f})')
    
    plt.plot([0, 1], [0, 1], 'k--', lw=2)
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('假阳性率')
    plt.ylabel('真阳性率')
    plt.title(f'ROC曲线 - {model_name}')
    plt.legend(loc="lower right")
    plt.savefig(os.path.join(visualization_dir, f'{model_name}_roc_curve.png'))
    plt.close()

def plot_precision_recall_curve(y_true, y_prob, model_name):
    # 多分类情况
    if len(np.unique(y_true)) > 2:
        y_bin = label_binarize(y_true, classes=np.unique(y_true))
        n_classes = y_bin.shape[1]
        
        plt.figure(figsize=(10, 8))
        for i in range(n_classes):
            precision, recall, _ = precision_recall_curve(y_bin[:, i], y_prob[:, i])
            pr_auc = auc(recall, precision)
            plt.plot(recall, precision, lw=2, label=f'PR曲线 (类别 {i}) (AUC = {pr_auc:.2f})')
    else:
        # 二分类情况
        if y_prob.shape[1] > 1:  # 如果两个类别的概率都可用
            precision, recall, _ = precision_recall_curve(y_true, y_prob[:, 1])
        else:  # 如果只有正类的概率可用
            precision, recall, _ = precision_recall_curve(y_true, y_prob)
        pr_auc = auc(recall, precision)
        plt.figure(figsize=(10, 8))
        plt.plot(recall, precision, lw=2, label=f'PR曲线 (AUC = {pr_auc:.2f})')
    
    plt.xlabel('召回率')
    plt.ylabel('精确率')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.title(f'精确率-召回率曲线 - {model_name}')
    plt.legend(loc="lower left")
    plt.savefig(os.path.join(visualization_dir, f'{model_name}_pr_curve.png'))
    plt.close()

# 生成模型比较可视化
def plot_model_comparison(results):
    metrics = ['accuracy', 'precision', 'recall', 'f1_score']
    model_names = list(results.keys())
    
    # 创建用于绘图的DataFrame
    comparison_data = []
    for name in model_names:
        for metric in metrics:
            comparison_data.append({
                'Model': name,
                'Metric': metric.replace('_', ' ').title(),
                'Value': results[name][metric]
            })
    
    comparison_df = pd.DataFrame(comparison_data)
    
    # 绘图
    plt.figure(figsize=(12, 8))
    sns.barplot(x='Metric', y='Value', hue='Model', data=comparison_df)
    plt.title('模型性能比较')
    plt.ylim(0, 1)
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    plt.savefig(os.path.join(visualization_dir, 'model_comparison.png'))
    plt.close()

# 为每个模型生成可视化
print("\n生成可视化...")
for name, result in results.items():
    plot_confusion_matrix(y_test_np, result["predictions"], name)
    plot_roc_curve(y_test_np, result["probabilities"], name)
    plot_precision_recall_curve(y_test_np, result["probabilities"], name)

# 生成模型比较可视化
plot_model_comparison(results)

print(f"\n所有可视化已保存至 {visualization_dir}")

# 保存所有模型
if not os.path.exists(model_params_save_dir):
    os.makedirs(model_params_save_dir)

# 保存PDTGA模型
torch.save(pdtga_model.state_dict(), os.path.join(model_params_save_dir, 'pdtga_model.pth'))

# 保存MLP模型
torch.save(mlp_model.state_dict(), os.path.join(model_params_save_dir, 'mlp_model.pth'))

# 保存sklearn模型
joblib.dump(rf_model, os.path.join(model_params_save_dir, 'random_forest_model.joblib'))
joblib.dump(xgb_model, os.path.join(model_params_save_dir, 'xgboost_model.joblib'))

# 创建一个包含基于准确率的最佳模型的字典
best_model_name = max(results, key=lambda k: results[k]['accuracy'])
best_model_accuracy = results[best_model_name]['accuracy']

print(f"\n最佳模型: {best_model_name}，准确率为 {best_model_accuracy:.4f}")

return best_model_accuracy
