#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
区块链交易欺诈检测模型训练脚本
"""

import numpy as np  # 线性代数
import pandas as pd  # 数据处理，CSV文件I/O
import os
import random
import networkx as nx
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import gc
from gensim.models import Word2Vec
from sklearn.neural_network import MLPClassifier as SkMLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score
from sklearn.metrics import roc_curve, auc, precision_recall_curve
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import label_binarize
import joblib
import json

# 设置随机种子，确保结果可复现
RANDOM_SEED = 42
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)

def load_data():
    """加载并预处理数据"""
    print("正在加载数据...")
    
    # 读取交易数据
    first_order_df = pd.read_csv('data/first_order_df.csv')
    first_order_df.drop(['Unnamed: 0'], axis=1, inplace=True)
    
    # 统计交易计数
    from_counts = first_order_df['From'].value_counts()
    to_counts = first_order_df['To'].value_counts()
    combined_counts = from_counts.add(to_counts, fill_value=0)
    
    # 移除交易次数少于5次或超过500次的值
    filtered_counts = combined_counts.loc[(combined_counts >= 5) & (combined_counts <= 500)]
    
    # 删除所有不必要的列
    values_to_keep = filtered_counts.index.tolist()
    filtered_df = first_order_df[first_order_df['From'].isin(values_to_keep) & 
                               first_order_df['To'].isin(values_to_keep)]
    filtered_df = filtered_df.reset_index(drop=True)
    
    print(f"数据加载完成，过滤后的交易数量: {len(filtered_df)}")
    return filtered_df, first_order_df 

def build_graph(filtered_df):
    """从交易数据构建有向图"""
    print("正在构建交易网络图...")
    # 创建有向图
    graph = nx.DiGraph()
    
    # 向图中添加边
    for i in range(len(filtered_df)):
        graph.add_edge(filtered_df['From'].iloc[i], filtered_df['To'].iloc[i])
    
    print(f"图构建完成，节点数量: {len(graph.nodes())}, 边数量: {len(graph.edges())}")
    return graph

def get_random_walk(node, graph, path_length=10):
    """执行随机游走，生成节点序列"""
    random_walk = [node]
    for i in range(path_length-1):
        temp = list(graph.neighbors(node))
        temp = list(set(temp) - set(random_walk))
        if len(temp) == 0:
            break

        random_node = random.choice(temp)
        random_walk.append(random_node)
        node = random_node
    return random_walk

def generate_random_walks(graph, walks_per_node=100, walk_length=10):
    """为图中所有节点生成随机游走"""
    print("正在生成随机游走数据...")
    all_nodes = list(graph.nodes())
    random_walks = []
    
    for n in tqdm(all_nodes):
        for _ in range(walks_per_node):
            random_walks.append(get_random_walk(n, graph, walk_length))
    
    print(f"随机游走生成完成，总游走数量: {len(random_walks)}")
    return random_walks

def extract_node_features(graph, filtered_df):
    """提取节点特征"""
    print("正在提取节点特征...")
    # 计算所有节点的度
    degrees = graph.degree()
    in_degrees = graph.in_degree()
    out_degrees = graph.out_degree()
    
    data = []
    nodes = graph.nodes()
    
    for node in tqdm(list(nodes)):
        degree = degrees[node]
        in_degree = in_degrees[node]
        out_degree = out_degrees[node]
        in_degree_ratio = in_degree / out_degree if out_degree != 0 else float('inf')
        out_degree_ratio = out_degree / in_degree if in_degree != 0 else float('inf')
        transfer_out = filtered_df.loc[filtered_df['From'] == node, 'Value'].sum()
        transfer_in = filtered_df.loc[filtered_df['To'] == node, 'Value'].sum()
        transaction = transfer_out + transfer_in
        transaction_diff = transfer_in - transfer_out
        transaction_ratio = transfer_in / transfer_out if transfer_out != 0 else float('inf')
        transfer_in_ratio = transfer_in / in_degree if in_degree != 0 else float('inf')
        transfer_out_ratio = transfer_out / out_degree if out_degree != 0 else float('inf')
        neighbours = len(set(graph.neighbors(node)))

        timestamp_diff = ((filtered_df.loc[filtered_df['From'] == node, 'TimeStamp'].diff()).sum() + 
                          (filtered_df.loc[filtered_df['To'] == node, 'TimeStamp'].diff()).sum())
        avg_timestamp_diff = timestamp_diff / (len(filtered_df.loc[filtered_df['From'] == node, 'TimeStamp']) + 
                                              len(filtered_df.loc[filtered_df['To'] == node, 'TimeStamp']))
        inv_timestamp_freq = 1 / avg_timestamp_diff if avg_timestamp_diff != 0 else 0
        
        node_dict = {
            "Node": node,
            "Total Degree": degree,
            "Out-Degree": out_degree,
            "In-Degree": in_degree,
            "Out-Degree Ratio": out_degree_ratio,
            "In-Degree Ratio": in_degree_ratio,
            "Sum of Transactions": transaction,
            "Transfer-Out Transaction": transfer_out,
            "Transfer-In Transaction": transfer_in,
            "Transaction Difference": transaction_diff,
            "Transaction_Ratio": transaction_ratio,
            "Transfer-In Ratio": transfer_in_ratio,
            "Transfer-Out Ratio": transfer_out_ratio,
            "Number of Neighbours": neighbours,
            "Inverse Timestamp Frequency": inv_timestamp_freq
        }
        data.append(node_dict)
    
    # 提取每个节点的时间戳
    timestamp_nodes = []
    for i in tqdm(range(len(data))):
        fr = list(filtered_df[filtered_df['From'] == data[i]['Node']]['TimeStamp'])
        to = list(filtered_df[filtered_df['To'] == data[i]['Node']]['TimeStamp'])
        fr = fr + to
        timestamp_nodes.append(fr)
    
    print("节点特征提取完成")
    return data, timestamp_nodes

def prepare_model_inputs(data, timestamp_nodes):
    """准备模型输入数据"""
    print("正在准备模型输入数据...")
    # 准备节点特征
    nodes = [
        [node['Total Degree'], node['Out-Degree'], node['In-Degree'],
         node['Out-Degree Ratio'], node['In-Degree Ratio'], node['Sum of Transactions'],
         node['Transfer-Out Transaction'], node['Transfer-In Transaction'],
         node['Transaction Difference'], node['Transaction_Ratio'],
         node['Transfer-In Ratio'], node['Transfer-Out Ratio'],
         node['Number of Neighbours'], node['Inverse Timestamp Frequency']]
        for node in data
    ]

    # 准备边特征
    edges = [
        [node['Out-Degree'], node['In-Degree'],
         node['Out-Degree Ratio'], node['In-Degree Ratio'],
         node['Transfer-Out Transaction'], node['Transfer-In Transaction'],
         node['Transfer-In Ratio'], node['Transfer-Out Ratio']]
        for node in data
    ]
    
    # 转换为PyTorch张量
    node_features = torch.tensor(nodes, dtype=torch.float32)
    edge_features = torch.tensor(edges, dtype=torch.float32)
    
    # 处理时间戳
    timestamps = [torch.tensor(ts, dtype=torch.float32) for ts in timestamp_nodes]
    max_length = max(len(ts) for ts in timestamps) if timestamps else 1
    padded_timestamps = [F.pad(ts, (0, max_length - len(ts))) for ts in timestamps]
    
    print("模型输入数据准备完成")
    return node_features, edge_features, padded_timestamps, len(data) 

class TGATLayer(nn.Module):
    """时序图注意力层"""
    def __init__(self, input_dim, edge_dim, hidden_dim, num_heads):
        super(TGATLayer, self).__init__()
        self.num_heads = num_heads
        self.hidden_dim = hidden_dim  # 保存为类实例变量
        self.input_dim = input_dim
        self.edge_dim = edge_dim
        
        # 用于'查询'，'键'和'值'的线性投影
        self.Wq = nn.Linear(input_dim + edge_dim, hidden_dim * num_heads)
        self.Wk = nn.Linear(input_dim + edge_dim, hidden_dim * num_heads)
        self.Wv = nn.Linear(input_dim + edge_dim, hidden_dim * num_heads)

        # 输出线性投影
        self.Wout = nn.Linear(hidden_dim, hidden_dim)  # 简化输出层的维度
    
    def forward(self, node_features, edge_features, timestamps_list, num_nodes):
        # 连接节点特征和边特征
        Z = torch.cat([node_features, edge_features], dim=1)
        
        # 获取批次大小
        batch_size = Z.size(0)
        
        # 线性投影
        Q = self.Wq(Z).view(batch_size, -1, self.hidden_dim)
        K = self.Wk(Z).view(batch_size, -1, self.hidden_dim)
        V = self.Wv(Z).view(batch_size, -1, self.hidden_dim)

        # 沿新维度连接时间戳
        timestamps_tensor = torch.stack(timestamps_list, dim=1)

        # 注意力机制
        attention_scores = torch.matmul(Q, K.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.hidden_dim, dtype=torch.float32))
        attention_weights = F.softmax(attention_scores, dim=-1)
        attention_weights = torch.nan_to_num(attention_weights, nan=0)
        V = torch.nan_to_num(V, nan=0)
        attended_values = torch.matmul(attention_weights, V)
        
        # 确保维度正确
        if attended_values.dim() > 2:
            attended_values = attended_values.mean(dim=1)  # 平均池化，得到形状为 [batch_size, hidden_dim]
        
        # 输出线性投影
        output = self.Wout(attended_values)
        return output

class MultiHeadAttention(nn.Module):
    """多头注意力层"""
    def __init__(self, input_dim, hidden_dim, num_heads, node_features_dim, edge_dim):
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        self.head_dim = input_dim // num_heads
        self.attention_layers = nn.ModuleList([
            TGATLayer(input_dim, edge_dim, hidden_dim, num_heads) for _ in range(num_heads)
        ])

    def forward(self, node_features, edge_features, timestamps_list, num_nodes):
        head_outputs = [
            layer(node_features, edge_features, timestamps_list, num_nodes) 
            for layer in self.attention_layers
        ]
        concatenated_representations = torch.cat(head_outputs, dim=-1)
        return concatenated_representations

class GATLayer(nn.Module):
    """图注意力层"""
    def __init__(self, input_dim, output_dim, dropout=0.6, alpha=0.2):
        super(GATLayer, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.dropout = dropout
        self.alpha = alpha
        
        # 线性变换权重矩阵
        self.W = nn.Linear(input_dim, output_dim, bias=False)
        
        # 注意力机制的权重向量
        self.a = nn.Linear(2 * output_dim, 1, bias=False)
        
        # LeakyReLU激活函数
        self.leakyrelu = nn.LeakyReLU(self.alpha)
        
        # Dropout层
        self.dropout_layer = nn.Dropout(self.dropout)
    
    def forward(self, node_features, edge_features=None):
        # 线性变换
        h = self.W(node_features)
        batch_size = h.size(0)
        
        # 计算注意力系数
        # 为每对节点创建特征对
        a_input = torch.cat([h.repeat(1, batch_size).view(batch_size * batch_size, -1),
                           h.repeat(batch_size, 1)], dim=1).view(batch_size, batch_size, 2 * self.output_dim)
        
        # 计算未归一化的注意力系数
        e = self.leakyrelu(self.a(a_input).squeeze(2))
        
        # 掩码和归一化
        attention = F.softmax(e, dim=1)
        attention = self.dropout_layer(attention)
        
        # 应用注意力系数
        h_prime = torch.matmul(attention, h)
        
        return h_prime

class GATModel(nn.Module):
    """图注意力网络模型"""
    def __init__(self, input_dim, hidden_dim, output_dim, num_heads=4, dropout=0.6, alpha=0.2):
        super(GATModel, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.num_heads = num_heads
        
        # 多头注意力层
        self.attentions = nn.ModuleList([
            GATLayer(input_dim, hidden_dim, dropout=dropout, alpha=alpha) for _ in range(num_heads)
        ])
        
        # 输出层
        self.out_layer = GATLayer(hidden_dim * num_heads, output_dim, dropout=dropout, alpha=alpha)
        
        # 激活函数
        self.elu = nn.ELU()
    
    def forward(self, node_features, edge_features=None, timestamps=None, num_nodes=None):
        # 第一层多头注意力
        x = torch.cat([att(node_features) for att in self.attentions], dim=1)
        x = self.elu(x)
        
        # 输出层
        x = self.out_layer(x)
        
        return x

class PDTGAWithGraphAttention(nn.Module):
    """传播检测时序图注意力模型"""
    def __init__(self, input_dim, edge_feature_dim, time_feature_dim, hidden_dim, num_layers, num_heads):
        super(PDTGAWithGraphAttention, self).__init__()
        self.input_dim = input_dim
        self.edge_feature_dim = edge_feature_dim
        self.time_feature_dim = time_feature_dim
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.num_heads = num_heads
        
        # 初始化多头注意力层
        self.graph_attention = MultiHeadAttention(
            input_dim, hidden_dim, num_heads, input_dim, edge_feature_dim
        )
        
        # 初始化时序图注意力层列表
        self.tgat_layers = nn.ModuleList([
            TGATLayer(input_dim, edge_feature_dim, time_feature_dim, hidden_dim) 
            for _ in range(num_layers)
        ])
        
        # 计算FFN的输入维度 - 根据实际拼接后的维度动态计算
        # 注意: 这里设置为103是为了匹配实际拼接后的维度(根据错误消息)
        ffn_input_dim = 103
        
        # 重新初始化前馈网络层
        self.FFN = nn.Linear(ffn_input_dim, hidden_dim)
        print(f"FFN 输入维度: {ffn_input_dim}, 输出维度: {hidden_dim}")

    def forward(self, node_features, edge_features, timestamps, num_nodes):
        # 计算图注意力
        graph_attention_output = self.graph_attention(node_features, edge_features, timestamps, num_nodes)
        print(f"图注意力输出形状: {graph_attention_output.shape}")
        
        # 初始隐藏表示为输入节点特征
        hidden_representations = [node_features]
        
        # 通过多个TGAT层
        for i, tgat_layer in enumerate(self.tgat_layers):
            layer_output = tgat_layer(hidden_representations[i], edge_features, timestamps, num_nodes)
            print(f"TGAT层 {i} 输出形状: {layer_output.shape}")
            hidden_representations.append(layer_output)
        
        # 拼接所有表示
        concatenated_representations = torch.cat([graph_attention_output] + hidden_representations, dim=1)
        print(f"拼接后的表示形状: {concatenated_representations.shape}")
        
        # 处理NaN值
        concatenated_representations = torch.nan_to_num(concatenated_representations, nan=0)
        
        # 前馈神经网络
        output = self.FFN(concatenated_representations)
        print(f"最终输出形状: {output.shape}")
        
        return output

class MLPClassifier(nn.Module):
    """多层感知机分类器"""
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(MLPClassifier, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_dim, output_dim)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        x = self.sigmoid(x)
        return x 

def create_target_variables(graph, first_order_df):
    """创建目标变量（有无错误标记）"""
    print("正在创建目标变量...")
    nodes_list = list(graph.nodes())
    result_data = []

    for node in tqdm(nodes_list):
        # 判断节点是否参与过错误交易
        is_error_value = int(
            any(first_order_df[first_order_df['From'] == node]['isError'] == 1) or 
            any(first_order_df[first_order_df['To'] == node]['isError'] == 1)
        )
        result_data.append((node, is_error_value))

    result_df = pd.DataFrame(result_data, columns=['Node', 'isError'])
    print(f"目标变量创建完成，错误节点数量: {result_df['isError'].sum()}")
    return result_df

def train_models(node_features, edge_features, padded_timestamps, num_nodes, result_df):
    """训练多个模型并评估性能"""
    print("开始模型训练和评估...")
    
    # 模型参数
    node_features_dim = node_features.shape[1]
    edge_features_dim = edge_features.shape[1]
    time_dim = 1
    hidden_dim = node_features_dim + edge_features_dim
    num_heads = 4
    num_layers = 1
    
    # 创建PDTGA模型
    pdtga_model = PDTGAWithGraphAttention(
        node_features_dim, edge_features_dim, time_dim, hidden_dim, num_layers, num_heads
    )
    
    # 创建GAT模型
    gat_model = GATModel(
        node_features_dim, hidden_dim, hidden_dim, num_heads=num_heads
    )
    
    # 生成PDTGA嵌入
    print("正在生成PDTGA嵌入...")
    with torch.no_grad():
        output = pdtga_model(node_features, edge_features, padded_timestamps, num_nodes)
        
    # 生成GAT嵌入
    print("正在生成GAT嵌入...")
    with torch.no_grad():
        gat_output = gat_model(node_features, edge_features, padded_timestamps, num_nodes)
    
    # 转换目标变量为张量
    y = torch.tensor(result_df['isError'].values, dtype=torch.float32)
    
    # 分割数据集
    X_train, X_test, y_train, y_test = train_test_split(
        output, y, test_size=0.2, random_state=RANDOM_SEED
    )
    
    # 转换为NumPy数组以用于sklearn模型
    X_train_np = X_train.numpy()
    X_test_np = X_test.numpy()
    y_train_np = y_train.numpy()
    y_test_np = y_test.numpy()
    
    # 创建模型存储目录
    model_dir = 'model'
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    
    # 创建可视化目录
    vis_dir = os.path.join(model_dir, 'visualizations')
    if not os.path.exists(vis_dir):
        os.makedirs(vis_dir)
    
    # 训练随机森林模型
    print("训练随机森林模型...")
    rf_model = RandomForestClassifier(n_estimators=100, random_state=RANDOM_SEED)
    rf_model.fit(X_train_np, y_train_np)
    
    # 训练XGBoost模型
    print("训练XGBoost模型...")
    xgb_model = xgb.XGBClassifier(n_estimators=100, random_state=RANDOM_SEED)
    xgb_model.fit(X_train_np, y_train_np)
    
    # 创建并训练MLP模型
    print("训练MLP模型...")
    embedding_dim = X_train.shape[1]
    hidden_dim_mlp = 64
    output_dim = 1
    mlp_model = MLPClassifier(embedding_dim, hidden_dim_mlp, output_dim)
    
    # 定义损失函数和优化器
    criterion = nn.BCELoss()
    optimizer = torch.optim.Adam(mlp_model.parameters(), lr=0.001)
    
    # 训练MLP
    epochs = 50
    for epoch in range(epochs):
        # 前向传播
        outputs = mlp_model(X_train)
        loss = criterion(outputs.squeeze(), y_train)
        
        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        if (epoch + 1) % 10 == 0:
            print(f'轮次 [{epoch+1}/{epochs}], 损失: {loss.item():.4f}')
    
    # 评估所有模型
    print("\n模型评估结果:")
    models = {
        "PDTGA": {"model": pdtga_model, "is_torch": True, "output": output},
        "GAT": {"model": gat_model, "is_torch": True, "output": gat_output},
        "RandomForest": {"model": rf_model, "is_torch": False},
        "XGBoost": {"model": xgb_model, "is_torch": False},
        "MLP": {"model": mlp_model, "is_torch": True}
    }
    
    results = {}
    
    for name, model_info in models.items():
        model = model_info["model"]
        is_torch = model_info["is_torch"]
        
        if is_torch:
            model.eval()
            with torch.no_grad():
                if name in ["PDTGA", "GAT"]:
                    # 模型在测试集上的预测
                    # 只对测试集所需的样本数据生成预测
                    test_indices = torch.arange(len(y)) 
                    _, test_indices = train_test_split(
                        test_indices, test_size=0.2, random_state=RANDOM_SEED
                    )
                    
                    # 这里有几种方案：1. 重新训练一个模型 2. 重用之前生成的embeddings
                    # 为简单起见，我们直接使用之前保存的output
                    test_output = model_info["output"][test_indices]
                    
                    # 确保PDTGA和GAT模型的输出都被处理为二分类预测
                    # 对于PDTGA模型，我们需要特殊处理以确保二分类输出
                    if name == "PDTGA":
                        # 将PDTGA的输出转换为二分类预测
                        # 使用阈值0.5将连续值转换为二分类
                        test_output_binary = torch.zeros(test_output.size(0), 1)
                        for i in range(test_output.size(0)):
                            # 如果输出值大于0.5，则预测为1，否则为0
                            test_output_binary[i] = 1.0 if torch.mean(test_output[i]) > 0.5 else 0.0
                        test_output = test_output_binary
                else:
                    test_output = model(X_test)
                
                if test_output.shape[1] > 1:  # 多分类
                    _, pred = torch.max(test_output, 1)
                    pred = pred.numpy()
                    prob = F.softmax(test_output, dim=1).numpy()
                else:  # 二分类
                    pred = (test_output.squeeze() > 0.5).float().numpy()
                    # 创建二分类概率数组
                    prob_1 = test_output.squeeze().numpy()
                    # 处理可能的NaN值
                    prob_1 = np.nan_to_num(prob_1, nan=0.5)
                    prob_0 = 1 - prob_1
                    prob = np.column_stack((prob_0, prob_1))
        else:
            pred = model.predict(X_test_np)
            prob = model.predict_proba(X_test_np)
        
        # 确保预测结果和测试集大小一致
        if len(pred) != len(y_test_np):
            print(f"警告: {name} 模型预测结果大小 ({len(pred)}) 与测试集大小 ({len(y_test_np)}) 不一致!")
            continue
        
        # 计算评估指标
        accuracy = accuracy_score(y_test_np, pred)
        precision = precision_score(y_test_np, pred, average='weighted', zero_division=0)
        recall = recall_score(y_test_np, pred, average='weighted', zero_division=0)
        f1 = f1_score(y_test_np, pred, average='weighted', zero_division=0)
        
        # 计算混淆矩阵
        cm = confusion_matrix(y_test_np, pred)
        
        # 对于二分类，计算特异性、假阳性率、假阴性率
        # 确保混淆矩阵是2x2的
        if cm.shape == (2, 2):
            tn, fp, fn, tp = cm.ravel()
            specificity = tn / (tn + fp) if (tn + fp) > 0 else 0
            fpr = fp / (fp + tn) if (fp + tn) > 0 else 0
            fnr = fn / (fn + tp) if (fn + tp) > 0 else 0
        else:
            print(f"警告: {name} 模型生成的混淆矩阵维度为 {cm.shape}，不是期望的2x2")
            # 对于多分类或非标准形状的混淆矩阵，可以计算宏平均特异性
            specificity = "N/A"
            fpr = "N/A"
            fnr = "N/A"
        
        results[name] = {
            "accuracy": accuracy,
            "precision": precision,
            "recall": recall,
            "f1_score": f1,
            "specificity": specificity,
            "false_positive_rate": fpr,
            "false_negative_rate": fnr,
            "predictions": pred,
            "probabilities": prob,
            "confusion_matrix": cm
        }
        
        # 打印模型性能
        print(f"\n{name} 模型性能:")
        print(f"准确率: {accuracy:.4f}")
        print(f"精确率: {precision:.4f}")
        print(f"召回率: {recall:.4f}")
        print(f"F1分数: {f1:.4f}")
        print(f"混淆矩阵形状: {cm.shape}")
        
        if specificity != "N/A":
            print(f"特异性: {specificity:.4f}")
            print(f"假阳性率: {fpr:.4f}")
            print(f"假阴性率: {fnr:.4f}")
    
    # 可视化函数
    visualize_results(results, y_test_np, vis_dir)
    
    # 保存模型
    save_models(pdtga_model, gat_model, mlp_model, rf_model, xgb_model, model_dir, 
                node_features_dim, edge_features_dim, time_dim, hidden_dim, 
                num_heads, num_layers, embedding_dim, hidden_dim_mlp, output_dim)
    
    # 保存模型性能数据为JSON文件，供前端使用
    model_performance = {}
    for name, result in results.items():
        model_performance[name] = {
            "accuracy": float(result["accuracy"]),
            "precision": float(result["precision"]),
            "recall": float(result["recall"]),
            "f1_score": float(result["f1_score"])
        }
    
    # 保存性能数据
    with open(os.path.join(model_dir, 'model_performance.json'), 'w') as f:
        json.dump(model_performance, f, indent=4)
    
    print(f"模型性能数据已保存到 {os.path.join(model_dir, 'model_performance.json')}")
    
    # 找出最佳模型
    best_model_name = max(results, key=lambda k: results[k]['accuracy'])
    best_model_accuracy = results[best_model_name]['accuracy']
    print(f"\n最佳模型: {best_model_name}，准确率为 {best_model_accuracy:.4f}")
    
    return results

def visualize_results(results, y_test, vis_dir):
    """可视化模型结果"""
    print("正在生成可视化结果...")
    
    # 设置matplotlib支持中文
    import matplotlib as mpl
    import matplotlib.font_manager as fm
    
    # 尝试设置中文字体，支持多个备选方案
    font_list = ['SimHei', 'Microsoft YaHei', 'SimSun', 'Arial Unicode MS', 'STSong', 'STFangsong']
    
    # 检查系统中是否有中文字体
    chinese_font = None
    for font_name in font_list:
        # 检查是否有指定的字体
        font_path = None
        for f in fm.findSystemFonts():
            if font_name.lower() in os.path.basename(f).lower():
                font_path = f
                break
        
        if font_path:
            chinese_font = fm.FontProperties(fname=font_path)
            print(f"使用中文字体: {font_name}")
            break
    
    # 如果没有找到中文字体，回退到英文标签
    use_english = chinese_font is None
    if use_english:
        print("未找到中文字体，将使用英文标签")
    
    # 混淆矩阵可视化
    def plot_confusion_matrix(y_true, y_pred, model_name):
        cm = confusion_matrix(y_true, y_pred)
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
        
        if use_english:
            plt.title(f'Confusion Matrix - {model_name}')
            plt.ylabel('True Label')
            plt.xlabel('Predicted Label')
        else:
            plt.title(f'混淆矩阵 - {model_name}', fontproperties=chinese_font)
            plt.ylabel('真实标签', fontproperties=chinese_font)
            plt.xlabel('预测标签', fontproperties=chinese_font)
            
        plt.savefig(os.path.join(vis_dir, f'{model_name}_confusion_matrix.png'))
        plt.close()
    
    # ROC曲线可视化
    def plot_roc_curve(y_true, y_prob, model_name):
        # 确保没有NaN值
        if y_prob.shape[1] > 1:  # 如果有多个类别的概率
            y_prob_clean = np.nan_to_num(y_prob[:, 1], nan=0.5)
            fpr, tpr, _ = roc_curve(y_true, y_prob_clean)
        else:  # 只有正类的概率
            y_prob_clean = np.nan_to_num(y_prob, nan=0.5)
            fpr, tpr, _ = roc_curve(y_true, y_prob_clean)
        roc_auc = auc(fpr, tpr)
        
        plt.figure(figsize=(10, 8))
        plt.plot(fpr, tpr, lw=2, label=f'ROC curve (AUC = {roc_auc:.2f})' if use_english else f'ROC曲线 (AUC = {roc_auc:.2f})')
        plt.plot([0, 1], [0, 1], 'k--', lw=2)
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        
        if use_english:
            plt.xlabel('False Positive Rate')
            plt.ylabel('True Positive Rate')
            plt.title(f'ROC Curve - {model_name}')
        else:
            plt.xlabel('假阳性率', fontproperties=chinese_font)
            plt.ylabel('真阳性率', fontproperties=chinese_font)
            plt.title(f'ROC曲线 - {model_name}', fontproperties=chinese_font)
            
        plt.legend(loc="lower right", prop=chinese_font if not use_english else None)
        plt.savefig(os.path.join(vis_dir, f'{model_name}_roc_curve.png'))
        plt.close()
    
    # 精确率-召回率曲线可视化
    def plot_precision_recall_curve(y_true, y_prob, model_name):
        # 确保没有NaN值
        if y_prob.shape[1] > 1:  # 如果有多个类别的概率
            y_prob_clean = np.nan_to_num(y_prob[:, 1], nan=0.5)
            precision, recall, _ = precision_recall_curve(y_true, y_prob_clean)
        else:  # 只有正类的概率
            y_prob_clean = np.nan_to_num(y_prob, nan=0.5)
            precision, recall, _ = precision_recall_curve(y_true, y_prob_clean)
        pr_auc = auc(recall, precision)
        
        plt.figure(figsize=(10, 8))
        plt.plot(recall, precision, lw=2, label=f'PR curve (AUC = {pr_auc:.2f})' if use_english else f'PR曲线 (AUC = {pr_auc:.2f})')
        
        if use_english:
            plt.xlabel('Recall')
            plt.ylabel('Precision')
            plt.title(f'Precision-Recall Curve - {model_name}')
        else:
            plt.xlabel('召回率', fontproperties=chinese_font)
            plt.ylabel('精确率', fontproperties=chinese_font)
            plt.title(f'精确率-召回率曲线 - {model_name}', fontproperties=chinese_font)
            
        plt.ylim([0.0, 1.05])
        plt.xlim([0.0, 1.0])
        plt.legend(loc="lower left", prop=chinese_font if not use_english else None)
        plt.savefig(os.path.join(vis_dir, f'{model_name}_pr_curve.png'))
        plt.close()
    
    # 模型比较可视化
    def plot_model_comparison(results):
        metrics = ['accuracy', 'precision', 'recall', 'f1_score']
        model_names = list(results.keys())
        
        # 创建用于绘图的DataFrame
        comparison_data = []
        for name in model_names:
            for metric in metrics:
                comparison_data.append({
                    'Model': name,
                    'Metric': metric.replace('_', ' ').title(),
                    'Value': results[name][metric]
                })
        
        comparison_df = pd.DataFrame(comparison_data)
        
        # 绘图
        plt.figure(figsize=(12, 8))
        sns.barplot(x='Metric', y='Value', hue='Model', data=comparison_df)
        
        if use_english:
            plt.title('Model Performance Comparison')
        else:
            plt.title('模型性能比较', fontproperties=chinese_font)
            
        plt.ylim(0, 1)
        plt.grid(axis='y', linestyle='--', alpha=0.7)
        plt.savefig(os.path.join(vis_dir, 'model_comparison.png'))
        plt.close()
    
    # 为每个模型生成可视化
    for name, result in results.items():
        plot_confusion_matrix(y_test, result["predictions"], name)
        plot_roc_curve(y_test, result["probabilities"], name)
        plot_precision_recall_curve(y_test, result["probabilities"], name)
    
    # 生成模型比较可视化
    plot_model_comparison(results)
    print(f"可视化结果已保存到 {vis_dir} 目录")

def save_models(pdtga_model, gat_model, mlp_model, rf_model, xgb_model, model_dir,
               node_features_dim, edge_features_dim, time_dim, hidden_dim,
               num_heads, num_layers, embedding_dim, hidden_dim_mlp, output_dim):
    """保存训练好的模型"""
    print("正在保存模型...")
    
    # 保存PDTGA模型
    torch.save(pdtga_model.state_dict(), os.path.join(model_dir, 'pdtga_model.pth'))
    
    # 保存GAT模型
    torch.save(gat_model.state_dict(), os.path.join(model_dir, 'gat_model.pth'))
    
    # 保存MLP模型
    torch.save(mlp_model.state_dict(), os.path.join(model_dir, 'mlp_classifier.pth'))
    
    # 保存sklearn模型
    joblib.dump(rf_model, os.path.join(model_dir, 'random_forest_model.joblib'))
    joblib.dump(xgb_model, os.path.join(model_dir, 'xgboost_model.joblib'))
    
    # 保存模型参数信息
    model_info = {
        'node_features_dim': node_features_dim,
        'edge_features_dim': edge_features_dim,
        'time_dim': time_dim,
        'hidden_dim': hidden_dim,
        'num_heads': num_heads,
        'num_layers': num_layers,
        'embedding_dim': embedding_dim,
        'mlp_hidden_dim': hidden_dim_mlp,
        'output_dim': output_dim
    }
    
    with open(os.path.join(model_dir, 'model_info.json'), 'w') as f:
        json.dump(model_info, f)
    
    print(f"所有模型已保存到 {model_dir} 目录")

def main():
    """主函数"""
    print("区块链交易欺诈检测模型训练开始...")
    
    # 加载数据
    filtered_df, first_order_df = load_data()
    
    # 构建交易图
    graph = build_graph(filtered_df)
    
    # 生成随机游走（可选，用于节点嵌入）
    random_walks = generate_random_walks(graph)
    
    # 提取节点特征
    data, timestamp_nodes = extract_node_features(graph, filtered_df)
    
    # 准备模型输入
    node_features, edge_features, padded_timestamps, num_nodes = prepare_model_inputs(data, timestamp_nodes)
    
    # 创建目标变量
    result_df = create_target_variables(graph, first_order_df)
    
    # 训练和评估模型
    results = train_models(node_features, edge_features, padded_timestamps, num_nodes, result_df)
    
    print("区块链交易欺诈检测模型训练完成!")
    return results 

if __name__ == "__main__":
    # 在脚本直接运行时执行主函数
    main()