{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "97cdbbc1",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/data/home/sczc619/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import pickle\n",
    "import os\n",
    "import random as rd\n",
    "import numpy as np\n",
    "import copy\n",
    "import copy as cp\n",
    "import dgl\n",
    "from collections import defaultdict\n",
    "import time\n",
    "import scipy.sparse as sp\n",
    "from scipy.io import loadmat\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn\n",
    "from torch.nn import TransformerEncoder, TransformerEncoderLayer\n",
    "from torch_geometric.utils import to_dense_adj, subgraph\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.manifold import TSNE\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import f1_score, accuracy_score, recall_score, roc_auc_score, average_precision_score, \\\n",
    "    confusion_matrix\n",
    "\n",
    "filelist = {\n",
    "    'amz_upu': 'amz_upu_adjlists.pickle',\n",
    "    'amz_usu': 'amz_usu_adjlists.pickle',\n",
    "    'amz_uvu': 'amz_uvu_adjlists.pickle',\n",
    "    'yelp_rsr': 'yelp_rsr_adjlists.pickle',\n",
    "    'yelp_rtr': 'yelp_rtr_adjlists.pickle',\n",
    "    'yelp_rur': 'yelp_rur_adjlists.pickle'\n",
    "}\n",
    "\n",
    "file_matrix_prefix = {\n",
    "    'amz_upu': 'amazon_upu_matrix_',\n",
    "    'amz_usu': 'amazon_usu_matrix_',\n",
    "    'amz_uvu': 'amazon_uvu_matrix_',\n",
    "    'yelp_rsr': 'yelpnet_rsr_matrix_decompision_',\n",
    "    'yelp_rtr': 'yelpnet_rtr_matrix_decompision_',\n",
    "    'yelp_rur': 'yelpnet_rur_matrix_decompision_'\n",
    "}\n",
    "\n",
    "\n",
    "def create_node_subgraph(node_idx, feat_data, edge_indexs, device):\n",
    "    \"\"\"\n",
    "    为单个节点创建一阶邻居子图（所有邻居）\n",
    "    \"\"\"\n",
    "    neighbors = set()\n",
    "    for rel_idx in range(len(edge_indexs)):\n",
    "        edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "        # 找出以中心点为起点的边的终点\n",
    "        rel_neighbors = edge_index[1][edge_index[0] == node_idx].tolist()\n",
    "        neighbors.update(rel_neighbors)\n",
    "\n",
    "    # 移除中心节点自身\n",
    "    neighbors.discard(node_idx)\n",
    "    neighbors = list(neighbors)\n",
    "\n",
    "    # 如果邻居太多，进行随机采样截取\n",
    "    sample_size = 399\n",
    "    if len(neighbors) > sample_size:\n",
    "        neighbors = np.random.choice(neighbors, size=sample_size, replace=False).tolist()\n",
    "\n",
    "    # 构建子图节点列表，确保中心节点是第一个\n",
    "    sub_nodes = [node_idx] + [n for n in neighbors if n != node_idx]\n",
    "\n",
    "    # 构建子图边列表\n",
    "    sub_edge_index = []\n",
    "    for rel_idx in range(len(edge_indexs)):\n",
    "        edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "        mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)\n",
    "        local_edges = edge_index[:, mask]\n",
    "\n",
    "        # 创建节点映射\n",
    "        node_map = {n: i for i, n in enumerate(sub_nodes)}\n",
    "\n",
    "        # 将全局索引映射到局部索引\n",
    "        if len(local_edges) > 0 and local_edges.size > 0:\n",
    "            src_nodes = [node_map[src] for src in local_edges[0]]\n",
    "            dst_nodes = [node_map[dst] for dst in local_edges[1]]\n",
    "            edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "        else:\n",
    "            # 添加自环确保图不为空\n",
    "            edge_tensor = torch.tensor([[0], [0]], dtype=torch.long)\n",
    "\n",
    "        sub_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "    # 创建子图数据\n",
    "    subgraph = {\n",
    "        'features': feat_data[sub_nodes].clone(),\n",
    "        'edges': sub_edge_index,\n",
    "        'global_idx': sub_nodes\n",
    "    }\n",
    "\n",
    "    return subgraph\n",
    "\n",
    "\n",
    "def dict_to_edge_index(edge_dict):\n",
    "    source_nodes = []\n",
    "    target_nodes = []\n",
    "    for src, targets in edge_dict.items():\n",
    "        for target in targets:\n",
    "            source_nodes.append(src)\n",
    "            target_nodes.append(target)\n",
    "    edge_index = [source_nodes, target_nodes]\n",
    "    return torch.LongTensor(edge_index)\n",
    "\n",
    "\n",
    "def numpy_array_to_edge_index(np_array):\n",
    "    assert np_array.ndim == 2 and np_array.shape[0] == np_array.shape[1], \"Input must be a square matrix.\"\n",
    "    rows, cols = np.nonzero(np_array)\n",
    "    edge_index = np.vstack((rows, cols))\n",
    "    edge_index_tensor = torch.from_numpy(edge_index).long()\n",
    "    return edge_index_tensor\n",
    "\n",
    "\n",
    "def load_data(data, k=2, prefix=''):\n",
    "    pickle_file = {}\n",
    "    matrix_prefix = {}\n",
    "    for key in filelist:\n",
    "        pickle_file[key] = os.path.join(prefix, filelist[key])\n",
    "        matrix_prefix[key] = os.path.join(prefix, file_matrix_prefix[key])\n",
    "\n",
    "    if data == 'yelp':\n",
    "        data_file = loadmat(os.path.join(prefix, 'YelpChi.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['yelp_rur'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rur'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rtr'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rtr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rsr'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rsr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree],\n",
    "                [relation3, relation3_tree]], feat_data, labels\n",
    "    elif data == 'amazon':\n",
    "        data_file = loadmat(os.path.join(prefix, 'Amazon.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['amz_upu'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_upu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_usu'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_usu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_uvu'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_uvu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree],\n",
    "                [relation3, relation3_tree]], feat_data, labels\n",
    "\n",
    "\n",
    "def normalize(mx):\n",
    "    rowsum = np.array(mx.sum(1)) + 0.01\n",
    "    r_inv = np.power(rowsum, -1).flatten()\n",
    "    r_inv[np.isinf(r_inv)] = 0.\n",
    "    r_mat_inv = sp.diags(r_inv)\n",
    "    mx = r_mat_inv.dot(mx)\n",
    "    return mx\n",
    "\n",
    "\n",
    "def pos_neg_split(nodes, labels):\n",
    "    # 正负样本分割\n",
    "    pos_nodes = []\n",
    "    neg_nodes = cp.deepcopy(nodes)\n",
    "    aux_nodes = cp.deepcopy(nodes)\n",
    "    for idx, label in enumerate(labels):\n",
    "        if label == 1:\n",
    "            pos_nodes.append(aux_nodes[idx])\n",
    "            neg_nodes.remove(aux_nodes[idx])\n",
    "\n",
    "    return pos_nodes, neg_nodes\n",
    "\n",
    "\n",
    "def undersample(pos_nodes, neg_nodes, scale=1):\n",
    "    # 对负样本进行下采样，平衡正负样本数量\n",
    "    aux_nodes = cp.deepcopy(neg_nodes)\n",
    "    aux_nodes = rd.sample(aux_nodes, k=int(len(pos_nodes) * scale))\n",
    "    batch_nodes = pos_nodes + aux_nodes\n",
    "\n",
    "    return batch_nodes\n",
    "\n",
    "\n",
    "def calculate_g_mean(y_true, y_pred):\n",
    "    cm = confusion_matrix(y_true, y_pred)\n",
    "    sensitivities = []\n",
    "    for i in range(len(cm)):\n",
    "        TP = cm[i, i]\n",
    "        FN = cm[i, :].sum() - TP\n",
    "        sensitivity = TP / (TP + FN) if (TP + FN) != 0 else 0\n",
    "        sensitivities.append(sensitivity)\n",
    "    g_mean = np.prod(sensitivities) ** (1 / len(sensitivities))\n",
    "    return g_mean\n",
    "\n",
    "\n",
    "def iterate_batches(indices, batch_size, shuffle=True):\n",
    "    \"\"\"\n",
    "        将索引列表划分为指定大小的批次\n",
    "        :param indices: 样本索引列表\n",
    "        :param batch_size: 每个批次的大小\n",
    "        :param shuffle: 是否打乱顺序\n",
    "        :return: 生成批次索引的迭代器\n",
    "    \"\"\"\n",
    "    if shuffle:\n",
    "        rd.shuffle(indices)\n",
    "    for i in range(0, len(indices), batch_size):\n",
    "        yield indices[i:i + batch_size]\n",
    "\n",
    "\n",
    "def test(idx_eval, y_eval, model, feat_data, edge_indexs, device, batch_size=64):\n",
    "    model.eval()\n",
    "    all_probs = []\n",
    "    all_labels = []\n",
    "\n",
    "    # 分批处理\n",
    "    for batch_centers in iterate_batches(idx_eval, batch_size, shuffle=False):\n",
    "        subgraph_data = []\n",
    "\n",
    "        # 为每个中心节点构建子图\n",
    "        for xi in batch_centers:\n",
    "            subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "            subgraph_data.append(subgraph)\n",
    "\n",
    "        # 获取中心节点预测\n",
    "        with torch.no_grad():\n",
    "            center_logits, _ = model(subgraph_data)  # [B, 2]\n",
    "            probs = torch.softmax(center_logits, dim=-1)[:, 1]  # 正类概率\n",
    "            all_probs.extend(probs.cpu().numpy())\n",
    "            all_labels.extend([labels[xi] for xi in batch_centers])\n",
    "\n",
    "    # 计算指标\n",
    "    auc_score = roc_auc_score(all_labels, all_probs)\n",
    "    ap_score = average_precision_score(all_labels, all_probs)\n",
    "    pred_labels = (np.array(all_probs) >= 0.5).astype(int)\n",
    "    f1 = f1_score(all_labels, pred_labels, average='macro')\n",
    "    g_mean = calculate_g_mean(all_labels, pred_labels)\n",
    "\n",
    "    return auc_score, ap_score, f1, g_mean\n",
    "\n",
    "\n",
    "class BSNE_Transformer(nn.Module):\n",
    "    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,\n",
    "                 nhead=8, num_layers=3, dim_feedforward=256,\n",
    "                 drop_rate=0.5):\n",
    "        super().__init__()\n",
    "        self.relation_nums = relation_nums\n",
    "        self.d_model = d_model\n",
    "        self.nhead = nhead\n",
    "\n",
    "        self.feature_proj = nn.Sequential(\n",
    "            nn.Linear(in_feat, d_model),\n",
    "            nn.LayerNorm(d_model),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.norm = nn.LayerNorm(d_model)\n",
    "\n",
    "        self.relation_encoders = nn.ModuleList([\n",
    "            TransformerEncoder(\n",
    "                TransformerEncoderLayer(\n",
    "                    d_model=d_model,\n",
    "                    nhead=nhead,\n",
    "                    dim_feedforward=dim_feedforward,\n",
    "                    dropout=drop_rate,\n",
    "                    batch_first=True\n",
    "                ),\n",
    "                num_layers=num_layers\n",
    "            ) for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(relation_nums * d_model, 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop_rate),\n",
    "            nn.Linear(512, out_feat)\n",
    "        )\n",
    "        for p in self.parameters():\n",
    "            if p.dim() > 1:\n",
    "                nn.init.xavier_uniform_(p)\n",
    "\n",
    "    #         self.node_feature_extractor = nn.Sequential(\n",
    "    #             nn.Linear(relation_nums * d_model, d_model),\n",
    "    #             nn.ReLU(),\n",
    "    #             nn.LayerNorm(d_model)\n",
    "    #         )\n",
    "\n",
    "    def forward(self, subgraph_batch):\n",
    "        # 为每个子图单独处理\n",
    "        center_logits_list = []\n",
    "        center_features_list = []\n",
    "\n",
    "        for sg in subgraph_batch:\n",
    "            # 处理单个子图\n",
    "            features = self.feature_proj(sg['features'].unsqueeze(0))  # [1, num_nodes, d_model]\n",
    "            features = self.norm(features)\n",
    "\n",
    "            # 为每个关系类型单独处理\n",
    "            rel_outputs = []\n",
    "            num_nodes = features.size(1)\n",
    "\n",
    "            for rel_idx in range(self.relation_nums):\n",
    "                # 构建当前关系的邻接矩阵\n",
    "                edge_index = sg['edges'][rel_idx]\n",
    "                adj = torch.zeros(num_nodes, num_nodes,\n",
    "                                  dtype=torch.float, device=features.device)\n",
    "\n",
    "                if edge_index.size(1) > 0:\n",
    "                    src, dst = edge_index\n",
    "                    adj[src, dst] = 1.0\n",
    "\n",
    "                # 添加自环\n",
    "                adj[range(num_nodes), range(num_nodes)] = 1.0\n",
    "\n",
    "                # 创建注意力掩码\n",
    "                adj_mask = adj.masked_fill(adj == 0.0, float('-inf'))\n",
    "                adj_mask = adj_mask.masked_fill(adj == 1.0, 0.0)\n",
    "\n",
    "                # 扩展为多头注意力掩码\n",
    "                adj_mask = adj_mask.unsqueeze(0).unsqueeze(0)  # [1, 1, num_nodes, num_nodes]\n",
    "                adj_mask = adj_mask.expand(1, self.nhead, num_nodes, num_nodes)\n",
    "                adj_mask = adj_mask.reshape(-1, num_nodes, num_nodes)\n",
    "\n",
    "                # 关系编码\n",
    "                encoder_output = self.relation_encoders[rel_idx](\n",
    "                    src=features,\n",
    "                    mask=adj_mask\n",
    "                )\n",
    "                rel_outputs.append(encoder_output)\n",
    "\n",
    "            # 合并多关系特征\n",
    "            combined = torch.cat(rel_outputs, dim=-1)  # [1, num_nodes, rel*d_model]\n",
    "\n",
    "            # 提取中心节点特征（第一个节点）\n",
    "            center_features = combined[:, 0, :]  # [1, rel*d_model]\n",
    "            center_logits = self.classifier(center_features)  # [1, out_feat]\n",
    "            center_logits = F.log_softmax(center_logits, dim=-1)\n",
    "\n",
    "            center_logits_list.append(center_logits)\n",
    "            center_features_list.append(center_features)\n",
    "\n",
    "        # 将结果堆叠为批次\n",
    "        center_logits = torch.cat(center_logits_list, dim=0)\n",
    "        center_features = torch.cat(center_features_list, dim=0)\n",
    "\n",
    "        return center_logits, center_features\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "81c3027f",
   "metadata": {},
   "outputs": [],
   "source": [
    "   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "50a6d575",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "\n",
      "=== Pretraining Epoch 0 (Center Node: 268) ===\n",
      "Step 0: BSNE_Loss=22.8146,local_loss: 22.8128604888916,global_loss: 0.001752381445840001\n",
      "Step 1: BSNE_Loss=22.9056,local_loss: 22.846466064453125,global_loss: 0.05913722515106201\n",
      "Step 2: BSNE_Loss=21.4817,local_loss: 21.46270179748535,global_loss: 0.018983319401741028\n",
      "Step 3: BSNE_Loss=20.9640,local_loss: 20.90300750732422,global_loss: 0.061020512133836746\n",
      "Step 4: BSNE_Loss=19.9728,local_loss: 19.970300674438477,global_loss: 0.0025482624769210815\n",
      "Step 5: BSNE_Loss=19.5826,local_loss: 19.541614532470703,global_loss: 0.041002485901117325\n",
      "Step 6: BSNE_Loss=18.3338,local_loss: 18.329866409301758,global_loss: 0.00391407310962677\n",
      "Step 7: BSNE_Loss=21.2345,local_loss: 21.139225006103516,global_loss: 0.09529009461402893\n",
      "Step 8: BSNE_Loss=22.1395,local_loss: 22.100263595581055,global_loss: 0.0392242968082428\n",
      "Step 9: BSNE_Loss=21.6070,local_loss: 21.605592727661133,global_loss: 0.0014297807356342673\n",
      "Step 10: BSNE_Loss=24.1373,local_loss: 24.13043975830078,global_loss: 0.0068781618028879166\n",
      "Step 11: BSNE_Loss=21.4122,local_loss: 21.39674186706543,global_loss: 0.015481227077543736\n",
      "Step 12: BSNE_Loss=18.8225,local_loss: 18.81767463684082,global_loss: 0.0047845556400716305\n",
      "Step 13: BSNE_Loss=23.7497,local_loss: 23.71475601196289,global_loss: 0.03497444838285446\n",
      "Step 14: BSNE_Loss=21.1749,local_loss: 21.136754989624023,global_loss: 0.038128841668367386\n",
      "Step 15: BSNE_Loss=23.3108,local_loss: 23.251604080200195,global_loss: 0.05920365825295448\n",
      "Step 16: BSNE_Loss=19.7081,local_loss: 19.698713302612305,global_loss: 0.009392420761287212\n",
      "Step 17: BSNE_Loss=20.9173,local_loss: 20.893798828125,global_loss: 0.023512570187449455\n",
      "Step 18: BSNE_Loss=22.1399,local_loss: 22.138193130493164,global_loss: 0.0016950557474046946\n",
      "Step 19: BSNE_Loss=20.5553,local_loss: 20.520185470581055,global_loss: 0.035074226558208466\n",
      "Step 20: BSNE_Loss=21.2867,local_loss: 21.278541564941406,global_loss: 0.008165677078068256\n",
      "Step 21: BSNE_Loss=21.0832,local_loss: 21.03274917602539,global_loss: 0.05045858770608902\n",
      "Step 22: BSNE_Loss=20.6231,local_loss: 20.592334747314453,global_loss: 0.03080512210726738\n",
      "Step 23: BSNE_Loss=21.2793,local_loss: 21.279190063476562,global_loss: 0.00011883320985361934\n",
      "Step 24: BSNE_Loss=17.8644,local_loss: 17.8154296875,global_loss: 0.04893927648663521\n",
      "Step 25: BSNE_Loss=19.1823,local_loss: 19.179420471191406,global_loss: 0.0028624366968870163\n",
      "Step 26: BSNE_Loss=22.7778,local_loss: 22.769031524658203,global_loss: 0.00878372136503458\n",
      "Step 27: BSNE_Loss=24.2475,local_loss: 24.225236892700195,global_loss: 0.022305360063910484\n",
      "Step 28: BSNE_Loss=21.8346,local_loss: 21.81564712524414,global_loss: 0.018929803743958473\n",
      "Step 29: BSNE_Loss=19.4517,local_loss: 19.35310173034668,global_loss: 0.09858111292123795\n",
      "Step 30: BSNE_Loss=22.9371,local_loss: 22.924068450927734,global_loss: 0.013019625097513199\n",
      "Step 31: BSNE_Loss=19.7491,local_loss: 19.74161720275879,global_loss: 0.007502504624426365\n",
      "Step 32: BSNE_Loss=22.2196,local_loss: 22.219589233398438,global_loss: 2.040773324551992e-05\n",
      "Step 33: BSNE_Loss=19.8817,local_loss: 19.881013870239258,global_loss: 0.0006682365783490241\n",
      "Step 34: BSNE_Loss=21.3918,local_loss: 21.34606170654297,global_loss: 0.045714180916547775\n",
      "Step 35: BSNE_Loss=19.5744,local_loss: 19.573442459106445,global_loss: 0.0010003646602854133\n",
      "Step 36: BSNE_Loss=21.3808,local_loss: 21.38052749633789,global_loss: 0.00031009656959213316\n",
      "Step 37: BSNE_Loss=21.2496,local_loss: 21.248958587646484,global_loss: 0.000676030816975981\n",
      "Step 38: BSNE_Loss=19.4015,local_loss: 19.38539695739746,global_loss: 0.01614108867943287\n",
      "Step 39: BSNE_Loss=17.6994,local_loss: 17.699186325073242,global_loss: 0.0001947136624949053\n",
      "Step 40: BSNE_Loss=20.5004,local_loss: 20.497661590576172,global_loss: 0.002693201182410121\n",
      "Step 41: BSNE_Loss=19.2068,local_loss: 19.10024070739746,global_loss: 0.10655716061592102\n",
      "Step 42: BSNE_Loss=21.8175,local_loss: 21.81206512451172,global_loss: 0.005418811459094286\n",
      "Step 43: BSNE_Loss=20.6892,local_loss: 20.657642364501953,global_loss: 0.0315704308450222\n",
      "Step 44: BSNE_Loss=21.5550,local_loss: 21.51500701904297,global_loss: 0.04003768786787987\n",
      "Step 45: BSNE_Loss=18.7288,local_loss: 18.711956024169922,global_loss: 0.016803542152047157\n",
      "Step 46: BSNE_Loss=20.4668,local_loss: 20.416828155517578,global_loss: 0.04996617138385773\n",
      "Step 47: BSNE_Loss=22.4297,local_loss: 22.33766746520996,global_loss: 0.09201717376708984\n",
      "Step 48: BSNE_Loss=18.7945,local_loss: 18.7759952545166,global_loss: 0.01855144090950489\n",
      "Step 49: BSNE_Loss=19.9449,local_loss: 19.94156265258789,global_loss: 0.0032909547444432974\n",
      "Step 50: BSNE_Loss=22.5561,local_loss: 22.545686721801758,global_loss: 0.010405431501567364\n",
      "Step 51: BSNE_Loss=21.7045,local_loss: 21.701984405517578,global_loss: 0.002520121866837144\n",
      "Step 52: BSNE_Loss=22.9249,local_loss: 22.800500869750977,global_loss: 0.12443695962429047\n",
      "Step 53: BSNE_Loss=20.1108,local_loss: 20.00580406188965,global_loss: 0.10498463362455368\n",
      "Step 54: BSNE_Loss=17.6073,local_loss: 17.587677001953125,global_loss: 0.019603192806243896\n",
      "Step 55: BSNE_Loss=18.8309,local_loss: 18.818666458129883,global_loss: 0.01224883645772934\n",
      "Step 56: BSNE_Loss=16.2449,local_loss: 16.19025421142578,global_loss: 0.05460713431239128\n",
      "Step 57: BSNE_Loss=20.9967,local_loss: 20.865806579589844,global_loss: 0.1308785080909729\n",
      "Step 58: BSNE_Loss=18.2243,local_loss: 18.218338012695312,global_loss: 0.00598018616437912\n",
      "Step 59: BSNE_Loss=21.6158,local_loss: 21.597597122192383,global_loss: 0.01816316694021225\n",
      "Step 60: BSNE_Loss=21.4841,local_loss: 21.473068237304688,global_loss: 0.011001059785485268\n",
      "Step 61: BSNE_Loss=22.2178,local_loss: 22.180622100830078,global_loss: 0.03719282150268555\n",
      "Step 62: BSNE_Loss=20.2332,local_loss: 20.07156753540039,global_loss: 0.16161593794822693\n",
      "Step 63: BSNE_Loss=19.8601,local_loss: 19.743513107299805,global_loss: 0.11659358441829681\n",
      "Step 64: BSNE_Loss=21.1585,local_loss: 21.154624938964844,global_loss: 0.003858958138152957\n",
      "Step 65: BSNE_Loss=16.9457,local_loss: 16.945695877075195,global_loss: 4.4777003495255485e-05\n",
      "Step 66: BSNE_Loss=21.8022,local_loss: 21.767183303833008,global_loss: 0.035063426941633224\n",
      "Step 67: BSNE_Loss=21.1448,local_loss: 21.10150146484375,global_loss: 0.043292973190546036\n",
      "Step 68: BSNE_Loss=21.1997,local_loss: 21.199687957763672,global_loss: 4.880152482655831e-05\n",
      "Step 69: BSNE_Loss=20.9050,local_loss: 20.9025936126709,global_loss: 0.002426174469292164\n",
      "Step 70: BSNE_Loss=21.3433,local_loss: 21.30484390258789,global_loss: 0.03848680853843689\n",
      "Step 71: BSNE_Loss=20.4326,local_loss: 20.41792106628418,global_loss: 0.014715375378727913\n",
      "Step 72: BSNE_Loss=21.1590,local_loss: 21.129425048828125,global_loss: 0.02954966202378273\n",
      "Step 73: BSNE_Loss=20.3173,local_loss: 20.27457046508789,global_loss: 0.042769260704517365\n",
      "Step 74: BSNE_Loss=18.5108,local_loss: 18.4437313079834,global_loss: 0.06704066693782806\n",
      "Step 75: BSNE_Loss=18.0760,local_loss: 18.06752586364746,global_loss: 0.008499645628035069\n",
      "Step 76: BSNE_Loss=16.2808,local_loss: 16.2769718170166,global_loss: 0.0038098283112049103\n",
      "Step 77: BSNE_Loss=17.3915,local_loss: 17.387195587158203,global_loss: 0.004319435451179743\n",
      "Step 78: BSNE_Loss=17.5904,local_loss: 17.590177536010742,global_loss: 0.00024975507403723896\n",
      "Step 79: BSNE_Loss=15.7685,local_loss: 15.688581466674805,global_loss: 0.07988877594470978\n",
      "Step 80: BSNE_Loss=18.3534,local_loss: 18.238910675048828,global_loss: 0.11453448981046677\n",
      "Step 81: BSNE_Loss=18.7050,local_loss: 18.645410537719727,global_loss: 0.05962013453245163\n",
      "Step 82: BSNE_Loss=19.2379,local_loss: 19.193540573120117,global_loss: 0.04431739076972008\n",
      "Step 83: BSNE_Loss=17.4529,local_loss: 17.37140464782715,global_loss: 0.08146438747644424\n",
      "Step 84: BSNE_Loss=15.3087,local_loss: 14.736252784729004,global_loss: 0.5724793076515198\n",
      "Step 85: BSNE_Loss=18.3939,local_loss: 18.36701202392578,global_loss: 0.02687857113778591\n",
      "Step 86: BSNE_Loss=20.1211,local_loss: 18.954605102539062,global_loss: 1.1665064096450806\n",
      "Step 87: BSNE_Loss=14.8879,local_loss: 14.882019996643066,global_loss: 0.0058569530956447124\n",
      "Step 88: BSNE_Loss=13.2326,local_loss: 12.766980171203613,global_loss: 0.46557220816612244\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 89: BSNE_Loss=12.3324,local_loss: 12.275420188903809,global_loss: 0.05696495249867439\n",
      "Step 90: BSNE_Loss=16.2893,local_loss: 16.092987060546875,global_loss: 0.19633154571056366\n",
      "Step 91: BSNE_Loss=13.3907,local_loss: 13.339693069458008,global_loss: 0.051029764115810394\n",
      "Step 92: BSNE_Loss=16.2247,local_loss: 16.1933650970459,global_loss: 0.03135388344526291\n",
      "Step 93: BSNE_Loss=12.5397,local_loss: 12.539143562316895,global_loss: 0.0005212515243329108\n",
      "Step 94: BSNE_Loss=12.4152,local_loss: 12.353589057922363,global_loss: 0.0616019144654274\n",
      "Step 95: BSNE_Loss=12.5349,local_loss: 12.492011070251465,global_loss: 0.042859598994255066\n",
      "Step 96: BSNE_Loss=12.4645,local_loss: 12.09367847442627,global_loss: 0.3708418905735016\n",
      "Step 97: BSNE_Loss=13.4414,local_loss: 13.401138305664062,global_loss: 0.04025885835289955\n",
      "Step 98: BSNE_Loss=13.8094,local_loss: 13.408089637756348,global_loss: 0.40131521224975586\n",
      "Step 99: BSNE_Loss=13.3355,local_loss: 13.30524730682373,global_loss: 0.030256617814302444\n",
      "Step 100: BSNE_Loss=11.3459,local_loss: 11.345870971679688,global_loss: 9.63936145126354e-07\n",
      "Step 101: BSNE_Loss=11.3022,local_loss: 11.182926177978516,global_loss: 0.1193208247423172\n",
      "Step 102: BSNE_Loss=14.0842,local_loss: 12.931636810302734,global_loss: 1.152565836906433\n",
      "Step 103: BSNE_Loss=10.6683,local_loss: 10.666997909545898,global_loss: 0.001349895028397441\n",
      "Step 104: BSNE_Loss=11.1025,local_loss: 10.681873321533203,global_loss: 0.4206392467021942\n",
      "Step 105: BSNE_Loss=9.5441,local_loss: 9.483732223510742,global_loss: 0.06035489961504936\n",
      "Step 106: BSNE_Loss=12.7640,local_loss: 10.118513107299805,global_loss: 2.6454434394836426\n",
      "Step 107: BSNE_Loss=9.4619,local_loss: 9.458989143371582,global_loss: 0.0028645226266235113\n",
      "Step 108: BSNE_Loss=12.3615,local_loss: 12.359029769897461,global_loss: 0.0024865062441676855\n",
      "Step 109: BSNE_Loss=10.4284,local_loss: 10.392248153686523,global_loss: 0.03617147356271744\n",
      "Step 110: BSNE_Loss=9.7651,local_loss: 9.73938274383545,global_loss: 0.02572598122060299\n",
      "Step 111: BSNE_Loss=8.9320,local_loss: 8.821796417236328,global_loss: 0.1102413684129715\n",
      "Step 112: BSNE_Loss=12.2568,local_loss: 12.22313117980957,global_loss: 0.033684004098176956\n",
      "Step 113: BSNE_Loss=7.8341,local_loss: 7.659118175506592,global_loss: 0.1749434471130371\n",
      "Step 114: BSNE_Loss=11.6062,local_loss: 11.06407642364502,global_loss: 0.5421327352523804\n",
      "Step 115: BSNE_Loss=10.9959,local_loss: 9.963276863098145,global_loss: 1.032604455947876\n",
      "Step 116: BSNE_Loss=8.8309,local_loss: 8.823857307434082,global_loss: 0.007060136180371046\n",
      "Step 117: BSNE_Loss=6.9061,local_loss: 6.848836421966553,global_loss: 0.05725238844752312\n",
      "Step 118: BSNE_Loss=9.3359,local_loss: 9.228260040283203,global_loss: 0.10767637938261032\n",
      "Step 119: BSNE_Loss=9.5573,local_loss: 9.425909042358398,global_loss: 0.13141192495822906\n",
      "Step 120: BSNE_Loss=11.1563,local_loss: 11.137307167053223,global_loss: 0.018995536491274834\n",
      "Step 121: BSNE_Loss=9.9770,local_loss: 9.216297149658203,global_loss: 0.7606557011604309\n",
      "Step 122: BSNE_Loss=11.4497,local_loss: 11.035964965820312,global_loss: 0.4136941730976105\n",
      "Step 123: BSNE_Loss=10.6107,local_loss: 7.904353618621826,global_loss: 2.7063724994659424\n",
      "Step 124: BSNE_Loss=8.1308,local_loss: 7.7347588539123535,global_loss: 0.396088570356369\n",
      "Step 125: BSNE_Loss=10.5250,local_loss: 10.52318000793457,global_loss: 0.0018496799748390913\n",
      "Step 126: BSNE_Loss=8.9266,local_loss: 8.729148864746094,global_loss: 0.19749492406845093\n",
      "Step 127: BSNE_Loss=11.7210,local_loss: 11.054794311523438,global_loss: 0.6662247180938721\n",
      "Step 128: BSNE_Loss=6.6920,local_loss: 6.673098087310791,global_loss: 0.018890993669629097\n",
      "Step 129: BSNE_Loss=7.3591,local_loss: 7.156947135925293,global_loss: 0.20215822756290436\n",
      "Step 130: BSNE_Loss=6.9502,local_loss: 6.88741397857666,global_loss: 0.06274478137493134\n",
      "Step 131: BSNE_Loss=8.7115,local_loss: 8.709463119506836,global_loss: 0.002084919949993491\n",
      "Step 132: BSNE_Loss=11.3463,local_loss: 10.786727905273438,global_loss: 0.5595613121986389\n",
      "Step 133: BSNE_Loss=12.9440,local_loss: 10.199491500854492,global_loss: 2.7445478439331055\n",
      "Step 137: BSNE_Loss=8.6841,local_loss: 8.656347274780273,global_loss: 0.027779577299952507\n",
      "Step 138: BSNE_Loss=9.1180,local_loss: 8.270733833312988,global_loss: 0.8472228646278381\n",
      "Step 139: BSNE_Loss=7.7309,local_loss: 7.714808464050293,global_loss: 0.01606135256588459\n",
      "Step 140: BSNE_Loss=8.9465,local_loss: 8.873908042907715,global_loss: 0.07261977344751358\n",
      "Step 141: BSNE_Loss=7.9264,local_loss: 7.8828911781311035,global_loss: 0.043529532849788666\n",
      "Step 142: BSNE_Loss=8.6797,local_loss: 8.67972469329834,global_loss: 2.071399194392143e-06\n",
      "Step 147: BSNE_Loss=6.0046,local_loss: 5.103100776672363,global_loss: 0.9015094637870789\n",
      "Step 148: BSNE_Loss=13.7253,local_loss: 9.158857345581055,global_loss: 4.566461563110352\n",
      "Step 149: BSNE_Loss=11.4383,local_loss: 8.58751106262207,global_loss: 2.850823402404785\n",
      "Step 150: BSNE_Loss=9.4633,local_loss: 9.409476280212402,global_loss: 0.053863607347011566\n",
      "Step 151: BSNE_Loss=8.8863,local_loss: 8.83767318725586,global_loss: 0.04866049811244011\n",
      "Step 152: BSNE_Loss=10.6493,local_loss: 9.673840522766113,global_loss: 0.9754098653793335\n",
      "Step 153: BSNE_Loss=8.9682,local_loss: 8.25428581237793,global_loss: 0.7139474153518677\n",
      "Step 154: BSNE_Loss=7.5735,local_loss: 7.09347677230835,global_loss: 0.4800112843513489\n",
      "Step 155: BSNE_Loss=11.1096,local_loss: 10.903177261352539,global_loss: 0.20647066831588745\n",
      "Step 156: BSNE_Loss=8.7858,local_loss: 7.314383506774902,global_loss: 1.471417784690857\n",
      "Step 157: BSNE_Loss=11.3155,local_loss: 9.395695686340332,global_loss: 1.9198416471481323\n",
      "Step 158: BSNE_Loss=10.6057,local_loss: 10.24413776397705,global_loss: 0.3615902066230774\n",
      "Step 159: BSNE_Loss=10.6115,local_loss: 10.601632118225098,global_loss: 0.009892298839986324\n",
      "Step 160: BSNE_Loss=7.6353,local_loss: 7.590484142303467,global_loss: 0.04485079273581505\n",
      "Step 161: BSNE_Loss=8.7259,local_loss: 8.703670501708984,global_loss: 0.022202111780643463\n",
      "Step 162: BSNE_Loss=11.3729,local_loss: 10.796463012695312,global_loss: 0.5764132738113403\n",
      "Step 163: BSNE_Loss=9.3263,local_loss: 9.100380897521973,global_loss: 0.2259296178817749\n",
      "Step 164: BSNE_Loss=8.8303,local_loss: 8.829475402832031,global_loss: 0.0008022587280720472\n",
      "Step 165: BSNE_Loss=9.6420,local_loss: 9.509963035583496,global_loss: 0.13202571868896484\n",
      "Step 166: BSNE_Loss=10.7914,local_loss: 9.752689361572266,global_loss: 1.0387520790100098\n",
      "Step 167: BSNE_Loss=8.7585,local_loss: 8.157143592834473,global_loss: 0.6013975739479065\n",
      "Step 168: BSNE_Loss=6.7810,local_loss: 6.177426338195801,global_loss: 0.6035411953926086\n",
      "Step 169: BSNE_Loss=11.2786,local_loss: 11.271343231201172,global_loss: 0.007281801663339138\n",
      "Step 170: BSNE_Loss=8.1316,local_loss: 8.13054084777832,global_loss: 0.0010606319410726428\n",
      "Step 171: BSNE_Loss=11.0603,local_loss: 10.859530448913574,global_loss: 0.20075416564941406\n",
      "Step 172: BSNE_Loss=8.2699,local_loss: 8.269883155822754,global_loss: 2.972141146528884e-07\n",
      "Step 173: BSNE_Loss=6.9260,local_loss: 6.6260552406311035,global_loss: 0.29992011189460754\n",
      "Step 174: BSNE_Loss=8.7438,local_loss: 8.742460250854492,global_loss: 0.001341118710115552\n",
      "Step 175: BSNE_Loss=6.9675,local_loss: 6.651340007781982,global_loss: 0.3161475956439972\n",
      "Step 176: BSNE_Loss=7.4787,local_loss: 7.446890354156494,global_loss: 0.031804051250219345\n",
      "Step 177: BSNE_Loss=8.3247,local_loss: 8.291850090026855,global_loss: 0.032855432480573654\n",
      "Step 178: BSNE_Loss=9.8608,local_loss: 8.587478637695312,global_loss: 1.2732961177825928\n",
      "Step 179: BSNE_Loss=8.8738,local_loss: 8.851908683776855,global_loss: 0.021869812160730362\n",
      "Step 180: BSNE_Loss=7.4636,local_loss: 7.458003044128418,global_loss: 0.005571102723479271\n",
      "Step 181: BSNE_Loss=9.6436,local_loss: 9.528928756713867,global_loss: 0.11469057947397232\n",
      "Step 182: BSNE_Loss=7.5785,local_loss: 7.5223870277404785,global_loss: 0.05611512437462807\n",
      "Step 183: BSNE_Loss=8.7170,local_loss: 8.101020812988281,global_loss: 0.6160044074058533\n",
      "Step 184: BSNE_Loss=9.0964,local_loss: 7.880776405334473,global_loss: 1.2156667709350586\n",
      "Step 185: BSNE_Loss=7.2725,local_loss: 7.239006042480469,global_loss: 0.03345252946019173\n",
      "Step 186: BSNE_Loss=9.0594,local_loss: 8.216055870056152,global_loss: 0.8433153629302979\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 187: BSNE_Loss=6.8047,local_loss: 6.736606121063232,global_loss: 0.06809603422880173\n",
      "Step 188: BSNE_Loss=8.8454,local_loss: 7.815718173980713,global_loss: 1.0296846628189087\n",
      "Step 189: BSNE_Loss=11.2292,local_loss: 11.216756820678711,global_loss: 0.012423147447407246\n",
      "Step 190: BSNE_Loss=10.4922,local_loss: 10.346478462219238,global_loss: 0.14569924771785736\n",
      "Step 191: BSNE_Loss=9.4752,local_loss: 9.467992782592773,global_loss: 0.007170416880398989\n",
      "Step 192: BSNE_Loss=8.5275,local_loss: 8.507495880126953,global_loss: 0.019976655021309853\n",
      "Step 193: BSNE_Loss=10.7490,local_loss: 9.608097076416016,global_loss: 1.1408826112747192\n",
      "Step 194: BSNE_Loss=8.9346,local_loss: 8.934274673461914,global_loss: 0.00034099697950296104\n",
      "Step 195: BSNE_Loss=7.5397,local_loss: 7.126914024353027,global_loss: 0.4127366840839386\n",
      "Step 196: BSNE_Loss=7.9511,local_loss: 7.834170341491699,global_loss: 0.11695470660924911\n",
      "Step 197: BSNE_Loss=9.8647,local_loss: 8.8220853805542,global_loss: 1.0426268577575684\n",
      "Step 198: BSNE_Loss=7.3108,local_loss: 7.137854099273682,global_loss: 0.17296938598155975\n",
      "Step 199: BSNE_Loss=5.7826,local_loss: 5.7289910316467285,global_loss: 0.05361253395676613\n",
      "Center node 268 finished at step 200 with loss 5.7826\n",
      "\n",
      "=== Pretraining Epoch 1 (Center Node: 7572) ===\n",
      "Step 0: BSNE_Loss=15.1891,local_loss: 15.184167861938477,global_loss: 0.004934157710522413\n",
      "Step 1: BSNE_Loss=15.0062,local_loss: 14.6849946975708,global_loss: 0.3212462365627289\n",
      "Step 2: BSNE_Loss=15.0732,local_loss: 15.07266616821289,global_loss: 0.0005443650297820568\n",
      "Step 3: BSNE_Loss=17.8082,local_loss: 17.78037452697754,global_loss: 0.027872955426573753\n",
      "Step 4: BSNE_Loss=16.1391,local_loss: 16.110471725463867,global_loss: 0.028590688481926918\n",
      "Step 5: BSNE_Loss=17.7056,local_loss: 17.68512725830078,global_loss: 0.020493360236287117\n",
      "Step 6: BSNE_Loss=14.6851,local_loss: 14.514732360839844,global_loss: 0.17033042013645172\n",
      "Step 7: BSNE_Loss=17.6324,local_loss: 17.628860473632812,global_loss: 0.0035637107212096453\n",
      "Step 8: BSNE_Loss=14.5539,local_loss: 14.551643371582031,global_loss: 0.002279995009303093\n",
      "Step 9: BSNE_Loss=15.8814,local_loss: 15.615910530090332,global_loss: 0.2654705047607422\n",
      "Step 10: BSNE_Loss=13.3754,local_loss: 13.368454933166504,global_loss: 0.006917025428265333\n",
      "Step 11: BSNE_Loss=17.2323,local_loss: 17.197349548339844,global_loss: 0.03496624901890755\n",
      "Step 12: BSNE_Loss=14.7473,local_loss: 14.674660682678223,global_loss: 0.07258949428796768\n",
      "Step 13: BSNE_Loss=16.2335,local_loss: 16.173391342163086,global_loss: 0.060115013271570206\n",
      "Step 14: BSNE_Loss=16.3942,local_loss: 16.359153747558594,global_loss: 0.0350729338824749\n",
      "Step 15: BSNE_Loss=17.1686,local_loss: 17.09261131286621,global_loss: 0.07598435133695602\n",
      "Step 16: BSNE_Loss=15.0504,local_loss: 15.018712043762207,global_loss: 0.03165783733129501\n",
      "Step 17: BSNE_Loss=12.8613,local_loss: 12.850107192993164,global_loss: 0.011196527630090714\n",
      "Step 18: BSNE_Loss=16.4741,local_loss: 16.1923828125,global_loss: 0.2817647457122803\n",
      "Step 19: BSNE_Loss=13.7092,local_loss: 13.688693046569824,global_loss: 0.020470665767788887\n",
      "Step 20: BSNE_Loss=14.6151,local_loss: 14.615020751953125,global_loss: 7.61404080549255e-05\n",
      "Step 21: BSNE_Loss=10.1895,local_loss: 9.975393295288086,global_loss: 0.21414417028427124\n",
      "Step 22: BSNE_Loss=12.8163,local_loss: 12.662683486938477,global_loss: 0.15361282229423523\n",
      "Step 23: BSNE_Loss=13.5535,local_loss: 13.333980560302734,global_loss: 0.2195449322462082\n",
      "Step 24: BSNE_Loss=13.7621,local_loss: 13.688268661499023,global_loss: 0.07378916442394257\n",
      "Step 25: BSNE_Loss=15.1926,local_loss: 15.178792953491211,global_loss: 0.013775195926427841\n",
      "Step 26: BSNE_Loss=11.7473,local_loss: 11.747288703918457,global_loss: 2.409255102975294e-05\n",
      "Step 27: BSNE_Loss=13.1876,local_loss: 13.0563383102417,global_loss: 0.13126227259635925\n",
      "Step 28: BSNE_Loss=14.7066,local_loss: 14.696077346801758,global_loss: 0.010487515479326248\n",
      "Step 29: BSNE_Loss=10.2521,local_loss: 9.88166618347168,global_loss: 0.3704490661621094\n",
      "Step 30: BSNE_Loss=10.1683,local_loss: 10.167924880981445,global_loss: 0.0003439769789110869\n",
      "Step 31: BSNE_Loss=10.2653,local_loss: 9.959676742553711,global_loss: 0.30565744638442993\n",
      "Step 32: BSNE_Loss=16.3989,local_loss: 15.55369758605957,global_loss: 0.8452273607254028\n",
      "Step 33: BSNE_Loss=8.3439,local_loss: 8.16847038269043,global_loss: 0.17547769844532013\n",
      "Step 34: BSNE_Loss=13.6499,local_loss: 12.426644325256348,global_loss: 1.2232508659362793\n",
      "Step 35: BSNE_Loss=8.6697,local_loss: 8.327478408813477,global_loss: 0.3422650098800659\n",
      "Step 36: BSNE_Loss=12.2041,local_loss: 12.12032413482666,global_loss: 0.08374244719743729\n",
      "Step 37: BSNE_Loss=10.9642,local_loss: 9.835041999816895,global_loss: 1.1291537284851074\n",
      "Step 38: BSNE_Loss=11.1251,local_loss: 10.452438354492188,global_loss: 0.6726153492927551\n",
      "Step 39: BSNE_Loss=10.2121,local_loss: 10.212048530578613,global_loss: 6.600331835215911e-05\n",
      "Step 40: BSNE_Loss=11.2462,local_loss: 10.42906379699707,global_loss: 0.8171044588088989\n",
      "Step 41: BSNE_Loss=11.6219,local_loss: 10.91744613647461,global_loss: 0.7044618725776672\n",
      "Step 42: BSNE_Loss=8.1884,local_loss: 8.009821891784668,global_loss: 0.1786188781261444\n",
      "Step 43: BSNE_Loss=12.2299,local_loss: 11.073354721069336,global_loss: 1.156516671180725\n",
      "Step 44: BSNE_Loss=12.1101,local_loss: 12.018486976623535,global_loss: 0.09163790941238403\n",
      "Step 45: BSNE_Loss=7.3960,local_loss: 7.268433094024658,global_loss: 0.12751907110214233\n",
      "Step 46: BSNE_Loss=13.5968,local_loss: 11.459362983703613,global_loss: 2.137441635131836\n",
      "Step 47: BSNE_Loss=9.4009,local_loss: 9.15732192993164,global_loss: 0.24359840154647827\n",
      "Step 48: BSNE_Loss=12.0835,local_loss: 11.526978492736816,global_loss: 0.5565458536148071\n",
      "Step 49: BSNE_Loss=8.1051,local_loss: 7.789872646331787,global_loss: 0.3152466416358948\n",
      "Step 50: BSNE_Loss=10.1755,local_loss: 9.960453987121582,global_loss: 0.21501769125461578\n",
      "Step 51: BSNE_Loss=8.8772,local_loss: 8.6171293258667,global_loss: 0.2601165175437927\n",
      "Step 52: BSNE_Loss=9.0078,local_loss: 8.882657051086426,global_loss: 0.1251230090856552\n",
      "Step 53: BSNE_Loss=11.0045,local_loss: 7.625459671020508,global_loss: 3.3790225982666016\n",
      "Step 54: BSNE_Loss=10.7155,local_loss: 10.165731430053711,global_loss: 0.5497772693634033\n",
      "Step 55: BSNE_Loss=10.5478,local_loss: 10.407772064208984,global_loss: 0.14002595841884613\n",
      "Step 56: BSNE_Loss=8.9863,local_loss: 8.6372652053833,global_loss: 0.3490428030490875\n",
      "Step 57: BSNE_Loss=11.0504,local_loss: 10.25698471069336,global_loss: 0.7934598922729492\n",
      "Step 58: BSNE_Loss=7.0302,local_loss: 6.84830379486084,global_loss: 0.1818707436323166\n",
      "Step 59: BSNE_Loss=8.9955,local_loss: 8.402199745178223,global_loss: 0.5932903289794922\n",
      "Step 60: BSNE_Loss=6.3934,local_loss: 6.392989635467529,global_loss: 0.00045928749023005366\n",
      "Step 61: BSNE_Loss=8.3399,local_loss: 7.523964881896973,global_loss: 0.8159530162811279\n",
      "Step 62: BSNE_Loss=8.0051,local_loss: 6.4898481369018555,global_loss: 1.5152575969696045\n",
      "Step 63: BSNE_Loss=8.9557,local_loss: 8.557653427124023,global_loss: 0.3980177640914917\n",
      "Step 64: BSNE_Loss=8.2415,local_loss: 7.883065223693848,global_loss: 0.35839542746543884\n",
      "Step 65: BSNE_Loss=7.1839,local_loss: 6.763992786407471,global_loss: 0.41989850997924805\n",
      "Step 66: BSNE_Loss=10.6838,local_loss: 10.640116691589355,global_loss: 0.04370595142245293\n",
      "Step 67: BSNE_Loss=7.5938,local_loss: 7.479599475860596,global_loss: 0.11417025327682495\n",
      "Step 68: BSNE_Loss=7.6328,local_loss: 7.392792224884033,global_loss: 0.23997800052165985\n",
      "Step 69: BSNE_Loss=7.4964,local_loss: 6.737505912780762,global_loss: 0.7588629126548767\n",
      "Step 70: BSNE_Loss=10.0656,local_loss: 9.317623138427734,global_loss: 0.7479827404022217\n",
      "Step 71: BSNE_Loss=5.5909,local_loss: 5.438900947570801,global_loss: 0.15202486515045166\n",
      "Step 72: BSNE_Loss=6.8550,local_loss: 6.773576259613037,global_loss: 0.08137484639883041\n",
      "Step 73: BSNE_Loss=8.4015,local_loss: 8.215202331542969,global_loss: 0.18625417351722717\n",
      "Step 74: BSNE_Loss=10.6316,local_loss: 10.616538047790527,global_loss: 0.015102522447705269\n",
      "Step 75: BSNE_Loss=7.8232,local_loss: 7.765382289886475,global_loss: 0.057776011526584625\n",
      "Step 76: BSNE_Loss=7.5366,local_loss: 7.433456897735596,global_loss: 0.10318970680236816\n",
      "Step 77: BSNE_Loss=7.4164,local_loss: 7.0986809730529785,global_loss: 0.31775355339050293\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 78: BSNE_Loss=10.2079,local_loss: 8.321745872497559,global_loss: 1.8861151933670044\n",
      "Step 79: BSNE_Loss=7.9509,local_loss: 7.929546356201172,global_loss: 0.021312804892659187\n",
      "Step 80: BSNE_Loss=7.6610,local_loss: 7.240152359008789,global_loss: 0.4208041727542877\n",
      "Step 81: BSNE_Loss=7.1069,local_loss: 6.874533653259277,global_loss: 0.2324145883321762\n",
      "Step 82: BSNE_Loss=10.6007,local_loss: 9.803083419799805,global_loss: 0.7976082563400269\n",
      "Step 83: BSNE_Loss=9.7326,local_loss: 7.940659046173096,global_loss: 1.7919671535491943\n",
      "Step 84: BSNE_Loss=6.5598,local_loss: 6.538547515869141,global_loss: 0.021218957379460335\n",
      "Step 85: BSNE_Loss=8.2039,local_loss: 6.234314441680908,global_loss: 1.9695919752120972\n",
      "Step 86: BSNE_Loss=8.0699,local_loss: 7.1972479820251465,global_loss: 0.8726450800895691\n",
      "Step 87: BSNE_Loss=8.1939,local_loss: 7.6071882247924805,global_loss: 0.5866622924804688\n",
      "Step 88: BSNE_Loss=6.2158,local_loss: 5.766279697418213,global_loss: 0.44951051473617554\n",
      "Step 89: BSNE_Loss=9.8838,local_loss: 8.662002563476562,global_loss: 1.221811294555664\n",
      "Step 90: BSNE_Loss=8.3007,local_loss: 6.411667346954346,global_loss: 1.8890635967254639\n",
      "Step 94: BSNE_Loss=7.3881,local_loss: 7.123732566833496,global_loss: 0.2643502950668335\n",
      "Step 95: BSNE_Loss=7.1936,local_loss: 7.045483589172363,global_loss: 0.1480703353881836\n",
      "Step 96: BSNE_Loss=14.0806,local_loss: 9.950218200683594,global_loss: 4.130401134490967\n",
      "Step 97: BSNE_Loss=5.6697,local_loss: 5.651712417602539,global_loss: 0.01800980605185032\n",
      "Step 98: BSNE_Loss=7.4593,local_loss: 7.262150287628174,global_loss: 0.19716569781303406\n",
      "Step 99: BSNE_Loss=7.7282,local_loss: 6.430309295654297,global_loss: 1.297896385192871\n",
      "Step 100: BSNE_Loss=6.8976,local_loss: 6.277106761932373,global_loss: 0.6205374598503113\n",
      "Step 101: BSNE_Loss=7.9226,local_loss: 7.919125556945801,global_loss: 0.0034252949990332127\n",
      "Step 102: BSNE_Loss=8.2540,local_loss: 8.25381088256836,global_loss: 0.00019933345902245492\n",
      "Step 103: BSNE_Loss=8.5789,local_loss: 7.667972087860107,global_loss: 0.9109210968017578\n",
      "Step 104: BSNE_Loss=8.3220,local_loss: 5.9085564613342285,global_loss: 2.413430690765381\n",
      "Step 105: BSNE_Loss=12.5605,local_loss: 10.753517150878906,global_loss: 1.8070236444473267\n",
      "Step 106: BSNE_Loss=7.0813,local_loss: 7.046225547790527,global_loss: 0.03507503122091293\n",
      "Step 107: BSNE_Loss=9.1218,local_loss: 6.893838882446289,global_loss: 2.2279465198516846\n",
      "Step 108: BSNE_Loss=7.3517,local_loss: 7.351090908050537,global_loss: 0.000618438352830708\n",
      "Step 109: BSNE_Loss=23.5150,local_loss: 22.898273468017578,global_loss: 0.6166877150535583\n",
      "Step 110: BSNE_Loss=19.3251,local_loss: 19.324481964111328,global_loss: 0.0006558374734595418\n",
      "Step 111: BSNE_Loss=25.7246,local_loss: 25.29656219482422,global_loss: 0.42806127667427063\n",
      "Step 112: BSNE_Loss=18.1245,local_loss: 18.11083984375,global_loss: 0.013709373772144318\n",
      "Step 113: BSNE_Loss=23.1770,local_loss: 23.04579734802246,global_loss: 0.13122372329235077\n",
      "Step 114: BSNE_Loss=24.4515,local_loss: 21.664113998413086,global_loss: 2.787376642227173\n",
      "Step 115: BSNE_Loss=22.1024,local_loss: 21.58976936340332,global_loss: 0.5126467943191528\n",
      "Step 116: BSNE_Loss=16.5042,local_loss: 16.18735694885254,global_loss: 0.31685662269592285\n",
      "Step 117: BSNE_Loss=14.5633,local_loss: 14.325568199157715,global_loss: 0.23769241571426392\n",
      "Step 118: BSNE_Loss=12.1981,local_loss: 12.022769927978516,global_loss: 0.17534543573856354\n",
      "Step 119: BSNE_Loss=14.2346,local_loss: 14.206653594970703,global_loss: 0.027919232845306396\n",
      "Step 120: BSNE_Loss=14.1256,local_loss: 13.966041564941406,global_loss: 0.15951190888881683\n",
      "Step 121: BSNE_Loss=11.1776,local_loss: 10.62026596069336,global_loss: 0.5573194026947021\n",
      "Step 122: BSNE_Loss=13.1006,local_loss: 12.865631103515625,global_loss: 0.23493289947509766\n",
      "Step 123: BSNE_Loss=14.8262,local_loss: 13.588948249816895,global_loss: 1.2372217178344727\n",
      "Step 124: BSNE_Loss=14.0417,local_loss: 12.927367210388184,global_loss: 1.114332675933838\n",
      "Step 125: BSNE_Loss=13.5117,local_loss: 12.966751098632812,global_loss: 0.544916033744812\n",
      "Step 126: BSNE_Loss=13.3870,local_loss: 13.19687557220459,global_loss: 0.1900985836982727\n",
      "Step 127: BSNE_Loss=16.0245,local_loss: 15.94047737121582,global_loss: 0.08406755328178406\n",
      "Step 128: BSNE_Loss=10.7440,local_loss: 10.683988571166992,global_loss: 0.060045938938856125\n",
      "Step 129: BSNE_Loss=12.3795,local_loss: 12.304428100585938,global_loss: 0.0750400722026825\n",
      "Step 130: BSNE_Loss=13.1431,local_loss: 13.14223575592041,global_loss: 0.0008440486271865666\n",
      "Step 131: BSNE_Loss=12.6943,local_loss: 12.534026145935059,global_loss: 0.16030488908290863\n",
      "Step 132: BSNE_Loss=13.8747,local_loss: 13.86015510559082,global_loss: 0.014557587914168835\n",
      "Step 133: BSNE_Loss=13.7047,local_loss: 13.491994857788086,global_loss: 0.21275003254413605\n",
      "Step 134: BSNE_Loss=14.3036,local_loss: 14.22949504852295,global_loss: 0.07409729808568954\n",
      "Step 135: BSNE_Loss=10.4364,local_loss: 10.411093711853027,global_loss: 0.025308722630143166\n",
      "Step 136: BSNE_Loss=10.8593,local_loss: 10.754077911376953,global_loss: 0.10524476319551468\n",
      "Step 137: BSNE_Loss=12.9568,local_loss: 12.954827308654785,global_loss: 0.001988421194255352\n",
      "Step 138: BSNE_Loss=10.1156,local_loss: 10.034728050231934,global_loss: 0.08087281137704849\n",
      "Step 139: BSNE_Loss=13.2640,local_loss: 12.735157012939453,global_loss: 0.5288444757461548\n",
      "Step 140: BSNE_Loss=14.8976,local_loss: 14.674775123596191,global_loss: 0.2227906435728073\n",
      "Step 141: BSNE_Loss=11.3770,local_loss: 11.31468391418457,global_loss: 0.06236463040113449\n",
      "Step 142: BSNE_Loss=10.3007,local_loss: 10.248685836791992,global_loss: 0.05205509811639786\n",
      "Step 143: BSNE_Loss=13.8727,local_loss: 13.865524291992188,global_loss: 0.007158830761909485\n",
      "Step 144: BSNE_Loss=13.1603,local_loss: 12.690322875976562,global_loss: 0.47000664472579956\n",
      "Step 145: BSNE_Loss=13.1052,local_loss: 12.830368041992188,global_loss: 0.27481386065483093\n",
      "Step 146: BSNE_Loss=10.9425,local_loss: 10.672537803649902,global_loss: 0.26997625827789307\n",
      "Step 147: BSNE_Loss=11.8805,local_loss: 11.859884262084961,global_loss: 0.0205810759216547\n",
      "Step 148: BSNE_Loss=12.7582,local_loss: 12.130538940429688,global_loss: 0.6276705861091614\n",
      "Step 149: BSNE_Loss=11.6809,local_loss: 11.325535774230957,global_loss: 0.3554092347621918\n",
      "Step 150: BSNE_Loss=12.3737,local_loss: 12.289985656738281,global_loss: 0.0837644413113594\n",
      "Step 151: BSNE_Loss=12.4081,local_loss: 12.274430274963379,global_loss: 0.1336560994386673\n",
      "Step 152: BSNE_Loss=13.9609,local_loss: 13.919251441955566,global_loss: 0.0416073352098465\n",
      "Step 153: BSNE_Loss=11.8753,local_loss: 11.833765029907227,global_loss: 0.04153065383434296\n",
      "Step 154: BSNE_Loss=12.5308,local_loss: 12.186928749084473,global_loss: 0.343913733959198\n",
      "Step 155: BSNE_Loss=11.6251,local_loss: 11.619941711425781,global_loss: 0.005110223311930895\n",
      "Step 156: BSNE_Loss=12.0356,local_loss: 11.92258358001709,global_loss: 0.113033227622509\n",
      "Step 157: BSNE_Loss=12.3113,local_loss: 12.2273530960083,global_loss: 0.08397150784730911\n",
      "Step 158: BSNE_Loss=11.1971,local_loss: 11.13621711730957,global_loss: 0.060843631625175476\n",
      "Step 159: BSNE_Loss=14.1495,local_loss: 13.484827995300293,global_loss: 0.6646549105644226\n",
      "Step 160: BSNE_Loss=12.1237,local_loss: 11.898280143737793,global_loss: 0.2254309207201004\n",
      "Step 161: BSNE_Loss=10.6278,local_loss: 10.625897407531738,global_loss: 0.0019455444999039173\n",
      "Step 162: BSNE_Loss=10.8961,local_loss: 10.734870910644531,global_loss: 0.1612718105316162\n",
      "Step 163: BSNE_Loss=13.4957,local_loss: 13.447258949279785,global_loss: 0.048464443534612656\n",
      "Step 164: BSNE_Loss=14.8482,local_loss: 14.311272621154785,global_loss: 0.5368981957435608\n",
      "Step 165: BSNE_Loss=11.7115,local_loss: 10.722410202026367,global_loss: 0.989043116569519\n",
      "Step 166: BSNE_Loss=15.1746,local_loss: 14.724242210388184,global_loss: 0.45033547282218933\n",
      "Step 167: BSNE_Loss=11.3962,local_loss: 11.385807991027832,global_loss: 0.01034518051892519\n",
      "Step 168: BSNE_Loss=10.7507,local_loss: 10.409276962280273,global_loss: 0.341457724571228\n",
      "Step 169: BSNE_Loss=13.7284,local_loss: 13.184505462646484,global_loss: 0.5438896417617798\n",
      "Step 170: BSNE_Loss=12.3273,local_loss: 11.620563507080078,global_loss: 0.7067834734916687\n",
      "Step 171: BSNE_Loss=12.6246,local_loss: 12.202459335327148,global_loss: 0.42212045192718506\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 172: BSNE_Loss=12.3644,local_loss: 12.351317405700684,global_loss: 0.013070955872535706\n",
      "Step 173: BSNE_Loss=11.5442,local_loss: 11.005666732788086,global_loss: 0.5384957790374756\n",
      "Step 174: BSNE_Loss=13.6860,local_loss: 13.684904098510742,global_loss: 0.0010683191940188408\n",
      "Step 175: BSNE_Loss=13.0736,local_loss: 13.007922172546387,global_loss: 0.0656905323266983\n",
      "Step 176: BSNE_Loss=14.0569,local_loss: 13.310150146484375,global_loss: 0.7467885613441467\n",
      "Step 177: BSNE_Loss=13.3985,local_loss: 13.290338516235352,global_loss: 0.10813570022583008\n",
      "Step 178: BSNE_Loss=13.9312,local_loss: 13.448126792907715,global_loss: 0.4831019639968872\n",
      "Step 179: BSNE_Loss=11.8116,local_loss: 11.555168151855469,global_loss: 0.2564670145511627\n",
      "Step 180: BSNE_Loss=10.9697,local_loss: 10.941492080688477,global_loss: 0.02819851040840149\n",
      "Step 181: BSNE_Loss=11.8689,local_loss: 10.618242263793945,global_loss: 1.250624418258667\n",
      "Step 182: BSNE_Loss=9.1875,local_loss: 9.097381591796875,global_loss: 0.09015299379825592\n",
      "Step 183: BSNE_Loss=10.9049,local_loss: 10.872758865356445,global_loss: 0.03217019885778427\n",
      "Step 184: BSNE_Loss=11.7476,local_loss: 11.637746810913086,global_loss: 0.1098175123333931\n",
      "Step 185: BSNE_Loss=11.4266,local_loss: 11.37348461151123,global_loss: 0.0531606487929821\n",
      "Step 186: BSNE_Loss=11.8512,local_loss: 11.178237915039062,global_loss: 0.6730024814605713\n",
      "Step 187: BSNE_Loss=12.7187,local_loss: 11.968795776367188,global_loss: 0.7499159574508667\n",
      "Step 188: BSNE_Loss=14.6739,local_loss: 14.0214204788208,global_loss: 0.6524837017059326\n",
      "Step 189: BSNE_Loss=12.5676,local_loss: 12.368006706237793,global_loss: 0.19956302642822266\n",
      "Step 190: BSNE_Loss=13.9375,local_loss: 13.490354537963867,global_loss: 0.4471895694732666\n",
      "Step 191: BSNE_Loss=10.5708,local_loss: 10.522282600402832,global_loss: 0.04854998365044594\n",
      "Step 192: BSNE_Loss=11.5128,local_loss: 11.431852340698242,global_loss: 0.0809345543384552\n",
      "Step 193: BSNE_Loss=10.3365,local_loss: 10.192525863647461,global_loss: 0.14397433400154114\n",
      "Step 194: BSNE_Loss=13.7197,local_loss: 13.380096435546875,global_loss: 0.33964163064956665\n",
      "Step 195: BSNE_Loss=11.5106,local_loss: 11.502761840820312,global_loss: 0.00782009493559599\n",
      "Step 196: BSNE_Loss=10.1069,local_loss: 10.05052375793457,global_loss: 0.056354768574237823\n",
      "Step 197: BSNE_Loss=11.5959,local_loss: 11.007229804992676,global_loss: 0.5886473059654236\n",
      "Step 198: BSNE_Loss=12.2510,local_loss: 11.283337593078613,global_loss: 0.9676340818405151\n",
      "Step 199: BSNE_Loss=11.3435,local_loss: 11.33919906616211,global_loss: 0.004297986160963774\n",
      "Center node 7572 finished at step 200 with loss 11.3435\n",
      "\n",
      "=== Pretraining Epoch 2 (Center Node: 7964) ===\n",
      "Step 0: BSNE_Loss=13.5473,local_loss: 13.23310375213623,global_loss: 0.31424158811569214\n",
      "Step 1: BSNE_Loss=12.8324,local_loss: 10.973640441894531,global_loss: 1.8587876558303833\n",
      "Step 2: BSNE_Loss=16.8370,local_loss: 16.414356231689453,global_loss: 0.422605037689209\n",
      "Step 3: BSNE_Loss=14.1952,local_loss: 13.840300559997559,global_loss: 0.3549407720565796\n",
      "Step 4: BSNE_Loss=15.3139,local_loss: 14.706122398376465,global_loss: 0.6077553033828735\n",
      "Step 5: BSNE_Loss=13.2967,local_loss: 13.295892715454102,global_loss: 0.0008185372571460903\n",
      "Step 6: BSNE_Loss=16.0693,local_loss: 15.328466415405273,global_loss: 0.7408336400985718\n",
      "Step 7: BSNE_Loss=11.1433,local_loss: 10.65872859954834,global_loss: 0.4845241606235504\n",
      "Step 8: BSNE_Loss=14.3773,local_loss: 14.371217727661133,global_loss: 0.006062581669539213\n",
      "Step 9: BSNE_Loss=11.2691,local_loss: 11.222493171691895,global_loss: 0.04658413678407669\n",
      "Step 10: BSNE_Loss=14.8577,local_loss: 14.51887321472168,global_loss: 0.3387854993343353\n",
      "Step 11: BSNE_Loss=14.3665,local_loss: 14.366007804870605,global_loss: 0.0004731596272904426\n",
      "Step 12: BSNE_Loss=12.2005,local_loss: 11.694330215454102,global_loss: 0.5062046051025391\n",
      "Step 13: BSNE_Loss=17.5873,local_loss: 16.933303833007812,global_loss: 0.654042661190033\n",
      "Step 14: BSNE_Loss=17.5566,local_loss: 17.536191940307617,global_loss: 0.020422527566552162\n",
      "Step 15: BSNE_Loss=15.7104,local_loss: 14.95096206665039,global_loss: 0.75947105884552\n",
      "Step 16: BSNE_Loss=12.5945,local_loss: 12.569872856140137,global_loss: 0.024650035426020622\n",
      "Step 17: BSNE_Loss=9.3179,local_loss: 9.280466079711914,global_loss: 0.03742494434118271\n",
      "Step 18: BSNE_Loss=14.1458,local_loss: 13.685611724853516,global_loss: 0.4601413905620575\n",
      "Step 19: BSNE_Loss=11.0353,local_loss: 10.778651237487793,global_loss: 0.256653368473053\n",
      "Step 20: BSNE_Loss=13.1059,local_loss: 12.666064262390137,global_loss: 0.4398709535598755\n",
      "Step 21: BSNE_Loss=17.5849,local_loss: 17.376102447509766,global_loss: 0.20881575345993042\n",
      "Step 22: BSNE_Loss=16.2233,local_loss: 16.057077407836914,global_loss: 0.16618819534778595\n",
      "Step 23: BSNE_Loss=16.9268,local_loss: 16.7590274810791,global_loss: 0.16778028011322021\n",
      "Step 24: BSNE_Loss=13.2783,local_loss: 12.510109901428223,global_loss: 0.7682182192802429\n",
      "Step 25: BSNE_Loss=15.5883,local_loss: 15.540785789489746,global_loss: 0.04748959466814995\n",
      "Step 26: BSNE_Loss=12.1166,local_loss: 11.857627868652344,global_loss: 0.25893279910087585\n",
      "Step 27: BSNE_Loss=15.0749,local_loss: 15.074302673339844,global_loss: 0.0005917003727518022\n",
      "Step 28: BSNE_Loss=9.2895,local_loss: 9.26793098449707,global_loss: 0.021557126194238663\n",
      "Step 29: BSNE_Loss=10.9861,local_loss: 10.669608116149902,global_loss: 0.31645506620407104\n",
      "Step 30: BSNE_Loss=16.3524,local_loss: 15.853647232055664,global_loss: 0.49873873591423035\n",
      "Step 31: BSNE_Loss=15.9386,local_loss: 15.388755798339844,global_loss: 0.5498409271240234\n",
      "Step 32: BSNE_Loss=13.1432,local_loss: 12.220282554626465,global_loss: 0.9229053854942322\n",
      "Step 33: BSNE_Loss=12.1762,local_loss: 11.956483840942383,global_loss: 0.21966873109340668\n",
      "Step 34: BSNE_Loss=17.8648,local_loss: 16.96788787841797,global_loss: 0.8969330191612244\n",
      "Step 35: BSNE_Loss=12.8062,local_loss: 12.648232460021973,global_loss: 0.15795154869556427\n",
      "Step 36: BSNE_Loss=12.8061,local_loss: 12.277181625366211,global_loss: 0.5288779735565186\n",
      "Step 37: BSNE_Loss=12.9816,local_loss: 12.276894569396973,global_loss: 0.7047274708747864\n",
      "Step 38: BSNE_Loss=13.6721,local_loss: 13.36264705657959,global_loss: 0.30948272347450256\n",
      "Step 39: BSNE_Loss=14.6052,local_loss: 14.59299373626709,global_loss: 0.012166861444711685\n",
      "Step 40: BSNE_Loss=13.6571,local_loss: 13.419086456298828,global_loss: 0.23805640637874603\n",
      "Step 41: BSNE_Loss=12.9578,local_loss: 12.331000328063965,global_loss: 0.626769483089447\n",
      "Step 42: BSNE_Loss=13.4754,local_loss: 13.47492504119873,global_loss: 0.0004841673653572798\n",
      "Step 43: BSNE_Loss=14.9129,local_loss: 14.130819320678711,global_loss: 0.7820828557014465\n",
      "Step 44: BSNE_Loss=14.2677,local_loss: 14.193761825561523,global_loss: 0.07395748794078827\n",
      "Step 45: BSNE_Loss=15.2836,local_loss: 15.0339937210083,global_loss: 0.24958473443984985\n",
      "Step 46: BSNE_Loss=14.2067,local_loss: 13.717564582824707,global_loss: 0.4891802966594696\n",
      "Step 47: BSNE_Loss=12.6662,local_loss: 12.246716499328613,global_loss: 0.41949400305747986\n",
      "Step 48: BSNE_Loss=10.8133,local_loss: 10.770179748535156,global_loss: 0.043148476630449295\n",
      "Step 49: BSNE_Loss=11.3816,local_loss: 11.299847602844238,global_loss: 0.08174902945756912\n",
      "Step 50: BSNE_Loss=12.2319,local_loss: 11.564902305603027,global_loss: 0.6669790744781494\n",
      "Step 51: BSNE_Loss=10.7787,local_loss: 10.641315460205078,global_loss: 0.1374243199825287\n",
      "Step 52: BSNE_Loss=15.1636,local_loss: 15.027822494506836,global_loss: 0.13581332564353943\n",
      "Step 53: BSNE_Loss=13.3799,local_loss: 13.101628303527832,global_loss: 0.27830082178115845\n",
      "Step 54: BSNE_Loss=13.5354,local_loss: 13.427281379699707,global_loss: 0.10808468610048294\n",
      "Step 55: BSNE_Loss=10.2215,local_loss: 9.853086471557617,global_loss: 0.3684108555316925\n",
      "Step 56: BSNE_Loss=13.5843,local_loss: 13.383829116821289,global_loss: 0.20051735639572144\n",
      "Step 57: BSNE_Loss=10.7772,local_loss: 10.156673431396484,global_loss: 0.6204805970191956\n",
      "Step 58: BSNE_Loss=13.7085,local_loss: 13.689498901367188,global_loss: 0.01904110796749592\n",
      "Step 59: BSNE_Loss=14.4173,local_loss: 12.246834754943848,global_loss: 2.170502185821533\n",
      "Step 60: BSNE_Loss=11.2659,local_loss: 11.228673934936523,global_loss: 0.03721430152654648\n",
      "Step 61: BSNE_Loss=15.7516,local_loss: 15.260376930236816,global_loss: 0.4912315905094147\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 62: BSNE_Loss=12.9394,local_loss: 11.781122207641602,global_loss: 1.1582942008972168\n",
      "Step 63: BSNE_Loss=9.2593,local_loss: 7.692985534667969,global_loss: 1.5662925243377686\n",
      "Step 64: BSNE_Loss=11.4201,local_loss: 10.725702285766602,global_loss: 0.6944171190261841\n",
      "Step 65: BSNE_Loss=10.1498,local_loss: 10.149229049682617,global_loss: 0.0005675728316418827\n",
      "Step 66: BSNE_Loss=11.7731,local_loss: 11.580086708068848,global_loss: 0.19297827780246735\n",
      "Step 67: BSNE_Loss=13.8330,local_loss: 13.800383567810059,global_loss: 0.03263016417622566\n",
      "Step 68: BSNE_Loss=14.7660,local_loss: 13.143917083740234,global_loss: 1.622117042541504\n",
      "Step 69: BSNE_Loss=11.5235,local_loss: 10.744214057922363,global_loss: 0.7792920470237732\n",
      "Step 70: BSNE_Loss=11.2987,local_loss: 10.739400863647461,global_loss: 0.5592823624610901\n",
      "Step 71: BSNE_Loss=13.1643,local_loss: 13.057225227355957,global_loss: 0.10709190368652344\n",
      "Step 72: BSNE_Loss=10.0742,local_loss: 10.051405906677246,global_loss: 0.02277177944779396\n",
      "Step 73: BSNE_Loss=12.3511,local_loss: 11.946017265319824,global_loss: 0.40503817796707153\n",
      "Step 74: BSNE_Loss=12.2161,local_loss: 12.036870956420898,global_loss: 0.17921182513237\n",
      "Step 75: BSNE_Loss=13.4833,local_loss: 13.424051284790039,global_loss: 0.05929052084684372\n",
      "Step 76: BSNE_Loss=13.7943,local_loss: 13.710234642028809,global_loss: 0.08408689498901367\n",
      "Step 77: BSNE_Loss=13.3053,local_loss: 13.285396575927734,global_loss: 0.019944895058870316\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 166\u001b[39m\n\u001b[32m    164\u001b[39m bp_node_features = []\n\u001b[32m    165\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m node_idx \u001b[38;5;129;01min\u001b[39;00m bp_nodes:\n\u001b[32m--> \u001b[39m\u001b[32m166\u001b[39m     node_subgraph = \u001b[43mcreate_node_subgraph\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnode_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfeat_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_indexs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    167\u001b[39m     _, node_feature = bsne_model([node_subgraph])\n\u001b[32m    168\u001b[39m     bp_node_features.append(node_feature.squeeze(\u001b[32m0\u001b[39m))\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 79\u001b[39m, in \u001b[36mcreate_node_subgraph\u001b[39m\u001b[34m(node_idx, feat_data, edge_indexs, device)\u001b[39m\n\u001b[32m     77\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(local_edges) > \u001b[32m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m local_edges.size > \u001b[32m0\u001b[39m:\n\u001b[32m     78\u001b[39m     src_nodes = [node_map[src] \u001b[38;5;28;01mfor\u001b[39;00m src \u001b[38;5;129;01min\u001b[39;00m local_edges[\u001b[32m0\u001b[39m]]\n\u001b[32m---> \u001b[39m\u001b[32m79\u001b[39m     dst_nodes = \u001b[43m[\u001b[49m\u001b[43mnode_map\u001b[49m\u001b[43m[\u001b[49m\u001b[43mdst\u001b[49m\u001b[43m]\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mdst\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mlocal_edges\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m1\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m]\u001b[49m\n\u001b[32m     80\u001b[39m     edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n\u001b[32m     81\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m     82\u001b[39m     \u001b[38;5;66;03m# 添加自环确保图不为空\u001b[39;00m\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 79\u001b[39m, in \u001b[36m<listcomp>\u001b[39m\u001b[34m(.0)\u001b[39m\n\u001b[32m     77\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(local_edges) > \u001b[32m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m local_edges.size > \u001b[32m0\u001b[39m:\n\u001b[32m     78\u001b[39m     src_nodes = [node_map[src] \u001b[38;5;28;01mfor\u001b[39;00m src \u001b[38;5;129;01min\u001b[39;00m local_edges[\u001b[32m0\u001b[39m]]\n\u001b[32m---> \u001b[39m\u001b[32m79\u001b[39m     dst_nodes = [node_map[dst] \u001b[38;5;28;01mfor\u001b[39;00m dst \u001b[38;5;129;01min\u001b[39;00m local_edges[\u001b[32m1\u001b[39m]]\n\u001b[32m     80\u001b[39m     edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n\u001b[32m     81\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m     82\u001b[39m     \u001b[38;5;66;03m# 添加自环确保图不为空\u001b[39;00m\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 进行完整测试（0707\n",
    "\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "#     \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 50,\n",
    "    \"pretrain_lr\": 0.0005, #0.0005，\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 4,\n",
    "    \"num_layers\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=args['num_layers'],\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "    \n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "temperature = 0.3  # 越小区分性越强\n",
    "loss_threshold = 0.4\n",
    "sample_size = 60\n",
    "max_steps_per_center = 200  # 防止死循环\n",
    "max_epochs = 30\n",
    "center_indices = list(range(feat_data.shape[0]))\n",
    "# 在每轮epoch前随机打乱中心点顺序\n",
    "rd.shuffle(center_indices)\n",
    "\n",
    "# 限制训练的中心点数量\n",
    "center_indices = center_indices[:max_epochs]\n",
    "\n",
    "\n",
    "for epoch, center_idx in enumerate(center_indices):\n",
    "    print(f\"\\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===\")\n",
    "    step = 0\n",
    "            \n",
    "    dist_row = dist_matrix[center_idx].cpu().numpy()\n",
    "#     dist_row = dist_matrix[center_idx].cpu()\n",
    "#     probs = np.power(10.0, -dist_row) # softmax，控制温度来拉大采样差异    \n",
    "\n",
    "#     probs = torch.softmax(-dist_row/temperature)\n",
    "    probs = np.exp(-dist_row / temperature)\n",
    "    \n",
    "    # 构建Bp子图\n",
    "    probs[center_idx] = 0\n",
    "#     print(probs.sum())\n",
    "#     probs /= probs.sum()\n",
    "    probs = probs / (probs.sum() + 1e-10) \n",
    "#     print(probs.sum())\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    while True:\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        total_loss = 0.0  # 每个step都要重置\n",
    "        eps = 1e-10\n",
    "        \n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            bp_nodes = neighbors.tolist()\n",
    "\n",
    "        # 构建Bu子图\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "            bu_nodes = neighbors.tolist()\n",
    "        \n",
    "        center_node_subgraph = create_node_subgraph(center_idx, feat_data, edge_indexs, device)\n",
    "        \n",
    "        _, center_feature = bsne_model([center_node_subgraph])\n",
    "        center_feature = center_feature.squeeze(0)\n",
    "        \n",
    "        # 计算Bp子图中所有节点的特征\n",
    "        bp_node_features = []\n",
    "        for node_idx in bp_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "        bp_features = torch.stack(bp_node_features)\n",
    "        \n",
    "\n",
    "        # 计算Bu子图中所有节点的特征\n",
    "        bu_node_features = []\n",
    "        for node_idx in bu_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "        bu_features = torch.stack(bu_node_features)\n",
    "        \n",
    "        # 计算loss\n",
    "#         center_feature = bp_features[0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "#         other_bp_indices = bp_nodes\n",
    "#         other_bp_features = bp_features\n",
    "        \n",
    "#         P = probs[bp_nodes]\n",
    "        P = torch.tensor(probs[bp_nodes], device=device, dtype=torch.float32)\n",
    "\n",
    "        # 计算Q向量（欧式距离）\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), bp_features).squeeze(0)\n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "        \n",
    "#         log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        # 原局部loss计算公式\n",
    "#         log_ratio = torch.log(((P + eps)/(Q + eps)))\n",
    "#         loss_local = log_ratio.mean() \n",
    "\n",
    "        # 计算Q向量（点积）\n",
    "#         center_norm = F.normalize(center_feature, dim=0)\n",
    "#         other_norm = F.normalize(other_bp_features, dim=1)\n",
    "#         feat_dists_bp = center_norm @ other_norm.t()\n",
    "# #         Q = torch.softmax(feat_dists_bp.squeeze(0), dim=0)\n",
    "#         Q = feat_dists_bp/feat_dists_bp.sum()\n",
    "    \n",
    "#         M = 0.5 * (P + Q)\n",
    "#         loss_local = 0.5 * (P * torch.log(P/M) + Q * torch.log(Q/M)).sum()\n",
    "#         log_ratio = torch.abs(torch.log(P / (Q + eps)))\n",
    "        log_ratio = (torch.log(P/Q))**2\n",
    "        loss_local = log_ratio.mean() \n",
    "\n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "#         all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "# #         unnorm_probs_global = np.exp(-all_dists)\n",
    "#         unnorm_probs_global = np.power(10.0, -all_dists)\n",
    "#         unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "#         # 计算全局归一化常数\n",
    "#         Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "#         # 计算B_p节点的全局概率和\n",
    "#         bp_global_prob_sum = unnorm_probs_global[bp_nodes].sum() / Z_global\n",
    "\n",
    "#         # 计算k_Bp\n",
    "#         N = dist_matrix.shape[0]\n",
    "#         k_Bp = bp_global_prob_sum * (N / len(bp_nodes))\n",
    "\n",
    "#         bu_features_ = bu_features[1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        \n",
    "        global_ratio = (sum_e_bu / (sum_e_bp + eps))\n",
    "        \n",
    "        \n",
    "#         loss_global = torch.abs(torch.log(global_ratio))\n",
    "        #加上平方项\n",
    "        loss_global = (torch.log(global_ratio.clamp(min=eps, max=1e10)))**2\n",
    "    \n",
    "        total_loss += loss_local + loss_global\n",
    "    \n",
    "        total_loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        \n",
    "#         for name, parms in bsne_model.named_parameters():\n",
    "#             grad_value = parms.grad\n",
    "#             if grad_value is not None:\n",
    "#                 grad_mean = torch.mean(grad_value)\n",
    "#             else:\n",
    "#                 grad_mean = \"No gradient\"\n",
    "\n",
    "#             print(f'-->name: {name} '\n",
    "#                   f'--weight: {torch.mean(parms.data).item():.6f} '\n",
    "#                   f'-->grad_value: {grad_mean}')\n",
    "        \n",
    "#         print(\"P:\",P)\n",
    "#         print(\"Q:\",Q)\n",
    "        print(f\"Step {step}: BSNE_Loss={total_loss.item():.4f},local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        step += 1\n",
    "        \n",
    "        if total_loss.item() < loss_threshold or step >= max_steps_per_center:\n",
    "            print(f\"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}\")\n",
    "            break\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ac5746d9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "15bfb7b4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bp_features: tensor([[ 1.5379e-01, -2.5115e-01, -1.4835e+00,  ..., -3.8957e+00,\n",
      "         -3.1473e-01,  1.0758e-01],\n",
      "        [ 5.9252e-01, -1.2292e-02, -3.0347e-01,  ..., -2.4255e+00,\n",
      "         -6.3382e-04, -2.3526e-01],\n",
      "        [-1.4551e-01,  4.6990e-02,  6.0272e-03,  ..., -2.2072e+00,\n",
      "         -4.1145e-01,  3.6693e-01],\n",
      "        ...,\n",
      "        [-4.5456e-01, -1.9187e-01, -6.8510e-02,  ..., -1.5710e+00,\n",
      "         -6.7994e-01,  8.8107e-01],\n",
      "        [-6.9214e-02, -9.3351e-01, -1.5648e-01,  ..., -3.2156e+00,\n",
      "          3.9368e-01,  1.2101e+00],\n",
      "        [ 4.1762e-02, -3.7311e-01, -1.0503e-01,  ...,  1.5461e-01,\n",
      "         -4.5418e-01,  4.0897e-02]], device='cuda:0', grad_fn=<StackBackward0>)\n",
      "bu_features: tensor([[-0.0184, -0.5900, -0.7437,  ..., -1.0337,  1.4888, -1.1678],\n",
      "        [ 0.3477,  0.5557, -0.7436,  ...,  0.1818, -0.5751, -0.3329],\n",
      "        [ 0.0310,  0.8537, -1.4270,  ..., -0.1282, -0.2549,  0.1970],\n",
      "        ...,\n",
      "        [ 1.7768, -0.5098,  0.2406,  ..., -0.1706, -0.4174,  0.7792],\n",
      "        [ 0.1729,  0.1236, -0.6441,  ..., -3.1630, -0.0760, -0.6291],\n",
      "        [-0.6186,  1.7482, -0.7534,  ..., -1.9393,  0.1020,  0.0648]],\n",
      "       device='cuda:0', grad_fn=<StackBackward0>)\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'orig_dists_bp' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mNameError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[4]\u001b[39m\u001b[32m, line 3\u001b[39m\n\u001b[32m      1\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33mbp_features:\u001b[39m\u001b[33m\"\u001b[39m,bp_features)\n\u001b[32m      2\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33mbu_features:\u001b[39m\u001b[33m\"\u001b[39m,bu_features)\n\u001b[32m----> \u001b[39m\u001b[32m3\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33morig_dists_bp:\u001b[39m\u001b[33m\"\u001b[39m,\u001b[43morig_dists_bp\u001b[49m)\n\u001b[32m      4\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33mP:\u001b[39m\u001b[33m\"\u001b[39m,P)\n\u001b[32m      5\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33mQ:\u001b[39m\u001b[33m\"\u001b[39m,Q)\n",
      "\u001b[31mNameError\u001b[39m: name 'orig_dists_bp' is not defined"
     ]
    }
   ],
   "source": [
    "print(\"probs:\",probs)\n",
    "print(\"center_feature:\",center_feature)\n",
    "print(\"bp_features:\",bp_features)\n",
    "print(\"bu_features:\",bu_features)\n",
    "\n",
    "print(\"P:\",P)\n",
    "print(\"Q:\",Q)\n",
    "\n",
    "print(\"feat_dists_bp:\",feat_dists_bp)\n",
    "print(\"feat_dists_bu:\",feat_dists_bu)\n",
    "print(\"sum_e_bp:\",sum_e_bp)\n",
    "print(\"sum_e_bu:\",sum_e_bu)\n",
    "print(\"global_ratio:\",global_ratio)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2e87fc62",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "fb44953e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(1.0000)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "probs.sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "069c1034",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-->name: feature_proj.0.weight -->grad_requirs: True --weight: 0.008665 -->grad_value: No gradient\n",
      "-->name: feature_proj.0.bias -->grad_requirs: True --weight: 0.010582 -->grad_value: No gradient\n",
      "-->name: feature_proj.1.weight -->grad_requirs: True --weight: 0.994209 -->grad_value: No gradient\n",
      "-->name: feature_proj.1.bias -->grad_requirs: True --weight: -0.012147 -->grad_value: No gradient\n",
      "-->name: norm.weight -->grad_requirs: True --weight: 0.996044 -->grad_value: No gradient\n",
      "-->name: norm.bias -->grad_requirs: True --weight: -0.003284 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.self_attn.in_proj_weight -->grad_requirs: True --weight: 0.001154 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.self_attn.in_proj_bias -->grad_requirs: True --weight: 0.000770 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.self_attn.out_proj.weight -->grad_requirs: True --weight: 0.000251 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.self_attn.out_proj.bias -->grad_requirs: True --weight: 0.002110 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.linear1.weight -->grad_requirs: True --weight: 0.000633 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.linear1.bias -->grad_requirs: True --weight: -0.007763 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.linear2.weight -->grad_requirs: True --weight: 0.000221 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.linear2.bias -->grad_requirs: True --weight: 0.005467 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.norm1.weight -->grad_requirs: True --weight: 1.003334 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.norm1.bias -->grad_requirs: True --weight: -0.006549 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.norm2.weight -->grad_requirs: True --weight: 1.000152 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.norm2.bias -->grad_requirs: True --weight: -0.001157 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.self_attn.in_proj_weight -->grad_requirs: True --weight: -0.001489 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.self_attn.in_proj_bias -->grad_requirs: True --weight: -0.003456 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.self_attn.out_proj.weight -->grad_requirs: True --weight: -0.000381 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.self_attn.out_proj.bias -->grad_requirs: True --weight: -0.005391 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.linear1.weight -->grad_requirs: True --weight: -0.000333 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.linear1.bias -->grad_requirs: True --weight: -0.014522 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.linear2.weight -->grad_requirs: True --weight: 0.000884 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.linear2.bias -->grad_requirs: True --weight: 0.008839 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.norm1.weight -->grad_requirs: True --weight: 1.000222 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.norm1.bias -->grad_requirs: True --weight: -0.004946 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.norm2.weight -->grad_requirs: True --weight: 0.988574 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.norm2.bias -->grad_requirs: True --weight: 0.001514 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.self_attn.in_proj_weight -->grad_requirs: True --weight: 0.001059 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.self_attn.in_proj_bias -->grad_requirs: True --weight: -0.002641 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.self_attn.out_proj.weight -->grad_requirs: True --weight: -0.001959 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.self_attn.out_proj.bias -->grad_requirs: True --weight: 0.004741 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.linear1.weight -->grad_requirs: True --weight: 0.000244 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.linear1.bias -->grad_requirs: True --weight: -0.014904 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.linear2.weight -->grad_requirs: True --weight: -0.003943 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.linear2.bias -->grad_requirs: True --weight: -0.000373 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.norm1.weight -->grad_requirs: True --weight: 1.002805 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.norm1.bias -->grad_requirs: True --weight: 0.000757 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.norm2.weight -->grad_requirs: True --weight: 1.001122 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.norm2.bias -->grad_requirs: True --weight: -0.005578 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.self_attn.in_proj_weight -->grad_requirs: True --weight: 0.001208 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.self_attn.in_proj_bias -->grad_requirs: True --weight: -0.000842 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.self_attn.out_proj.weight -->grad_requirs: True --weight: -0.002608 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.self_attn.out_proj.bias -->grad_requirs: True --weight: 0.000255 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.linear1.weight -->grad_requirs: True --weight: -0.000620 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.linear1.bias -->grad_requirs: True --weight: -0.009980 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.linear2.weight -->grad_requirs: True --weight: 0.000296 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.linear2.bias -->grad_requirs: True --weight: 0.000464 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.norm1.weight -->grad_requirs: True --weight: 0.999484 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.norm1.bias -->grad_requirs: True --weight: 0.012806 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.norm2.weight -->grad_requirs: True --weight: 1.025239 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.norm2.bias -->grad_requirs: True --weight: -0.002184 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.self_attn.in_proj_weight -->grad_requirs: True --weight: 0.001369 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.self_attn.in_proj_bias -->grad_requirs: True --weight: -0.000463 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.self_attn.out_proj.weight -->grad_requirs: True --weight: 0.000816 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.self_attn.out_proj.bias -->grad_requirs: True --weight: 0.003603 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.linear1.weight -->grad_requirs: True --weight: 0.000447 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.linear1.bias -->grad_requirs: True --weight: -0.000787 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.linear2.weight -->grad_requirs: True --weight: -0.001296 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.linear2.bias -->grad_requirs: True --weight: 0.000606 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.norm1.weight -->grad_requirs: True --weight: 0.997347 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.norm1.bias -->grad_requirs: True --weight: -0.000370 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.norm2.weight -->grad_requirs: True --weight: 0.999627 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.norm2.bias -->grad_requirs: True --weight: 0.001037 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.self_attn.in_proj_weight -->grad_requirs: True --weight: -0.000457 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.self_attn.in_proj_bias -->grad_requirs: True --weight: -0.004389 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.self_attn.out_proj.weight -->grad_requirs: True --weight: 0.000166 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.self_attn.out_proj.bias -->grad_requirs: True --weight: -0.000464 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.linear1.weight -->grad_requirs: True --weight: 0.000110 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.linear1.bias -->grad_requirs: True --weight: -0.002366 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.linear2.weight -->grad_requirs: True --weight: 0.001228 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.linear2.bias -->grad_requirs: True --weight: 0.005763 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.norm1.weight -->grad_requirs: True --weight: 0.999454 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.norm1.bias -->grad_requirs: True --weight: -0.000763 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.norm2.weight -->grad_requirs: True --weight: 0.995095 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.norm2.bias -->grad_requirs: True --weight: -0.002613 -->grad_value: No gradient\n",
      "-->name: classifier.0.weight -->grad_requirs: False --weight: -0.000072 -->grad_value: No gradient\n",
      "-->name: classifier.0.bias -->grad_requirs: False --weight: 0.001129 -->grad_value: No gradient\n",
      "-->name: classifier.3.weight -->grad_requirs: False --weight: -0.001398 -->grad_value: No gradient\n",
      "-->name: classifier.3.bias -->grad_requirs: False --weight: -0.008921 -->grad_value: No gradient\n"
     ]
    }
   ],
   "source": [
    "for name, parms in bsne_model.named_parameters():\n",
    "    grad_value = parms.grad\n",
    "    if grad_value is not None:\n",
    "        grad_mean = torch.mean(grad_value)\n",
    "    else:\n",
    "        grad_mean = \"No gradient\"\n",
    "\n",
    "    print(f'-->name: {name} '\n",
    "          f'-->grad_requirs: {parms.requires_grad} '\n",
    "          f'--weight: {torch.mean(parms.data).item():.6f} '\n",
    "          f'-->grad_value: {grad_mean}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "adc2ddbd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bp_features: tensor([[-0.5702,  1.3168, -0.1787,  ...,  0.7632,  0.0429, -0.1725],\n",
      "        [ 0.1251, -1.1670,  0.3653,  ...,  1.0615, -0.5137,  0.9988],\n",
      "        [-1.0533, -0.3202,  0.4615,  ...,  1.0612, -0.6082,  0.9952],\n",
      "        ...,\n",
      "        [-0.3784, -0.1971,  0.5413,  ...,  0.5937, -0.9628, -0.3055],\n",
      "        [-0.8808, -0.1173,  0.2112,  ..., -1.0315, -1.3149,  0.8907],\n",
      "        [-2.3371,  0.6095, -0.0873,  ...,  0.0804, -0.5554,  0.6815]],\n",
      "       device='cuda:0', grad_fn=<StackBackward0>)\n",
      "bu_features: tensor([[-0.2823,  0.3090,  0.5724,  ...,  0.4419, -1.0853, -1.1917],\n",
      "        [-0.2784, -0.3072,  0.2529,  ...,  0.5045, -0.9009, -1.1452],\n",
      "        [-1.5317, -0.4242,  0.1296,  ..., -0.1155,  0.4415,  0.5392],\n",
      "        ...,\n",
      "        [ 0.7232, -2.5266,  0.5857,  ...,  0.1232, -0.4201,  0.9106],\n",
      "        [ 0.3383, -0.7859,  0.3099,  ...,  0.0577, -0.6736, -1.0603],\n",
      "        [ 0.0080,  0.0586,  0.5155,  ...,  0.1816, -0.3376, -0.1120]],\n",
      "       device='cuda:0', grad_fn=<StackBackward0>)\n",
      "orig_dists_bp: [2 1 1 1 2 1 1 1 2 1 2 2 1 1 2 2 1 1 1 1 2 1 1 2 1 1 2 2 2 2 1 1 2 2 2 1 1\n",
      " 1 1 2]\n",
      "P: tensor([0.0015, 0.0424, 0.0424, 0.0424, 0.0015, 0.0424, 0.0424, 0.0424, 0.0015,\n",
      "        0.0424, 0.0015, 0.0015, 0.0424, 0.0424, 0.0015, 0.0015, 0.0424, 0.0424,\n",
      "        0.0424, 0.0424, 0.0015, 0.0424, 0.0424, 0.0015, 0.0424, 0.0424, 0.0015,\n",
      "        0.0015, 0.0015, 0.0015, 0.0424, 0.0424, 0.0015, 0.0015, 0.0015, 0.0424,\n",
      "        0.0424, 0.0424, 0.0424, 0.0015], device='cuda:0', dtype=torch.float64)\n",
      "feat_dists_bp: tensor([18.2731, 16.3308, 15.3421, 16.1293, 17.3107, 15.8543, 15.5849, 16.0872,\n",
      "        17.1668, 15.5329, 17.4775, 13.7219, 16.3866, 15.4689, 17.1367, 16.1066,\n",
      "        15.8276, 17.2620, 14.2742, 15.3443, 15.9957, 15.4327, 16.2292, 15.6903,\n",
      "        16.0715, 16.0175, 17.6961, 15.9743, 17.8844, 15.9710, 16.4330, 16.2207,\n",
      "        18.3810, 18.1741, 19.0851, 15.8568, 15.9311, 17.3604, 15.2764, 17.7107],\n",
      "       device='cuda:0', grad_fn=<SqueezeBackward1>)\n",
      "Q: tensor([0.0062, 0.0208, 0.0385, 0.0236, 0.0113, 0.0280, 0.0331, 0.0242, 0.0124,\n",
      "        0.0342, 0.0102, 0.1054, 0.0201, 0.0356, 0.0126, 0.0239, 0.0285, 0.0117,\n",
      "        0.0748, 0.0384, 0.0256, 0.0364, 0.0222, 0.0310, 0.0245, 0.0253, 0.0089,\n",
      "        0.0260, 0.0079, 0.0260, 0.0195, 0.0223, 0.0058, 0.0066, 0.0038, 0.0280,\n",
      "        0.0267, 0.0110, 0.0401, 0.0088], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "unnorm_probs_global: [0.01 0.1  0.01 ... 0.01 0.01 0.01]\n",
      "Z_global: 235.74600000010003\n",
      "bp_global_prob_sum: 0.01047737819517172\n",
      "k_Bp: 3.128545129078276\n",
      "feat_dists_bu: tensor([16.4020, 16.3742, 18.9296, 17.1797, 16.4774, 16.7754, 17.3933, 17.5789,\n",
      "        17.6073, 16.7882, 16.0743, 15.5274, 15.9045, 18.0716, 15.4278, 14.1915,\n",
      "        15.3967, 14.9977, 17.3479, 18.7902, 16.9052, 14.5443, 16.8814, 16.4295,\n",
      "        15.1829, 17.4214, 18.7773, 16.6827, 17.3622, 17.5833, 18.2887, 17.0419,\n",
      "        15.3728, 17.9242, 15.3646, 18.1372, 16.5211, 16.9873, 17.0721, 19.0133],\n",
      "       device='cuda:0', grad_fn=<SqueezeBackward1>)\n",
      "sum_e_bp: tensor(24.7924, device='cuda:0', grad_fn=<SumBackward0>)\n",
      "sum_e_bu: tensor(23.9281, device='cuda:0', grad_fn=<SumBackward0>)\n",
      "global_ratio: tensor(3.0195, device='cuda:0', grad_fn=<MulBackward0>)\n"
     ]
    }
   ],
   "source": [
    "print(\"bp_features:\",bp_features)\n",
    "print(\"bu_features:\",bu_features)\n",
    "print(\"orig_dists_bp:\",orig_dists_bp)\n",
    "print(\"P:\",P)\n",
    "\n",
    "\n",
    "print(\"feat_dists_bp:\",feat_dists_bp)\n",
    "print(\"Q:\",Q)\n",
    "\n",
    "\n",
    "print(\"unnorm_probs_global:\",unnorm_probs_global)\n",
    "print(\"Z_global:\",Z_global)\n",
    "print(\"bp_global_prob_sum:\",bp_global_prob_sum)\n",
    "print(\"k_Bp:\",k_Bp)\n",
    "print(\"feat_dists_bu:\",feat_dists_bu)\n",
    "print(\"sum_e_bp:\",sum_e_bp)\n",
    "print(\"sum_e_bu:\",sum_e_bu)\n",
    "print(\"global_ratio:\",global_ratio)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c5142326",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bp_features: tensor([[ 0.0735,  0.1432,  0.2947,  ..., -0.0746,  0.5167, -0.1833],\n",
      "        [ 0.8301,  0.0871, -1.5590,  ...,  0.5665, -0.5540,  0.1626],\n",
      "        [ 1.5938, -0.7020, -0.6375,  ...,  0.5392, -2.2870,  0.0392],\n",
      "        ...,\n",
      "        [ 0.2287,  0.2691, -0.6960,  ...,  1.8262, -0.2489, -1.0601],\n",
      "        [ 1.4011,  0.6579,  0.0041,  ...,  0.4725, -0.4251, -0.2861],\n",
      "        [ 0.7985,  0.3066, -0.7456,  ...,  1.0053, -0.0059,  0.4827]],\n",
      "       device='cuda:0', grad_fn=<StackBackward0>)\n",
      "bu_features: tensor([[ 0.6321,  0.6420, -0.1064,  ...,  1.5194, -1.2372, -0.6961],\n",
      "        [-0.0558, -0.0347, -0.2537,  ..., -0.3866,  0.1960, -0.6085],\n",
      "        [ 1.3395,  0.4545, -1.1564,  ...,  0.7400, -0.5331, -2.3669],\n",
      "        ...,\n",
      "        [ 1.1103,  0.4445, -0.2255,  ..., -1.2766, -0.6034, -1.1156],\n",
      "        [ 0.8419,  0.0935,  0.0104,  ...,  0.6042, -0.5481, -0.7126],\n",
      "        [ 1.8648,  1.4712, -0.4018,  ...,  0.8256,  0.7816,  0.0853]],\n",
      "       device='cuda:0', grad_fn=<StackBackward0>)\n",
      "orig_dists_bp: [1 1 1 2 2 1 1 2 2 2 2 2 2 2 2 2 1 2 1 2 2 2 1 2 2 2 2 2 2 2 2 2 2 2 2 1 1\n",
      " 2 2 2]\n",
      "P: tensor([0.0903, 0.0903, 0.0903, 0.0032, 0.0032, 0.0903, 0.0903, 0.0032, 0.0032,\n",
      "        0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0903, 0.0032,\n",
      "        0.0903, 0.0032, 0.0032, 0.0032, 0.0903, 0.0032, 0.0032, 0.0032, 0.0032,\n",
      "        0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0903,\n",
      "        0.0903, 0.0032, 0.0032, 0.0032], device='cuda:0', dtype=torch.float64)\n",
      "feat_dists_bp: tensor([17.1739, 17.9419, 17.6980, 17.8211, 17.2373, 18.0952, 17.2932, 17.2454,\n",
      "        15.9230, 18.3066, 16.3189, 17.9622, 17.3002, 17.0915, 18.1144, 16.4267,\n",
      "        16.8798, 17.9886, 18.8088, 16.7149, 16.3378, 17.5355, 17.1079, 16.9240,\n",
      "        16.4198, 16.7808, 18.6371, 18.1660, 17.5035, 18.7379, 16.9212, 16.8213,\n",
      "        16.4792, 17.4383, 17.9245, 17.4383, 17.7713, 16.0490, 16.9128, 17.5431],\n",
      "       device='cuda:0', grad_fn=<SqueezeBackward1>)\n",
      "Q: tensor([0.0219, 0.0090, 0.0120, 0.0104, 0.0204, 0.0076, 0.0191, 0.0202, 0.0931,\n",
      "        0.0059, 0.0589, 0.0088, 0.0190, 0.0241, 0.0074, 0.0520, 0.0308, 0.0086,\n",
      "        0.0033, 0.0373, 0.0576, 0.0145, 0.0237, 0.0293, 0.0524, 0.0346, 0.0040,\n",
      "        0.0070, 0.0150, 0.0036, 0.0294, 0.0330, 0.0490, 0.0162, 0.0092, 0.0162,\n",
      "        0.0110, 0.0805, 0.0297, 0.0143], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "unnorm_probs_global: [0.13533528 0.13533528 0.13533528 ... 0.13533528 0.13533528 0.13533528]\n",
      "Z_global: 1635.3245811136128\n",
      "bp_global_prob_sum: 0.004732303909687978\n",
      "k_Bp: 1.4130659474328304\n",
      "feat_dists_bu: tensor([18.5493, 18.2278, 17.8553, 18.3226, 17.2628, 17.0861, 17.7634, 18.5652,\n",
      "        16.0455, 16.7946, 16.8388, 17.5043, 17.0578, 17.6884, 17.6687, 17.8079,\n",
      "        18.3602, 17.6491, 16.6072, 18.1221, 18.5421, 17.4638, 17.9239, 18.8807,\n",
      "        17.6198, 18.5791, 16.7308, 16.2829, 16.9668, 17.1444, 17.2959, 16.9767,\n",
      "        17.2846, 17.1383, 17.6546, 18.2743, 16.7264, 18.1228, 18.0507, 16.7941],\n",
      "       device='cuda:0', grad_fn=<SqueezeBackward1>)\n",
      "sum_e_bp: tensor(25.2063, device='cuda:0', grad_fn=<SumBackward0>)\n",
      "sum_e_bu: tensor(24.1805, device='cuda:0', grad_fn=<SumBackward0>)\n",
      "global_ratio: tensor(1.3556, device='cuda:0', grad_fn=<MulBackward0>)\n"
     ]
    }
   ],
   "source": [
    "print(\"bp_features:\",bp_features)\n",
    "print(\"bu_features:\",bu_features)\n",
    "print(\"orig_dists_bp:\",orig_dists_bp)\n",
    "print(\"P:\",P)\n",
    "\n",
    "\n",
    "print(\"feat_dists_bp:\",feat_dists_bp)\n",
    "print(\"Q:\",Q)\n",
    "\n",
    "\n",
    "print(\"unnorm_probs_global:\",unnorm_probs_global)\n",
    "print(\"Z_global:\",Z_global)\n",
    "print(\"bp_global_prob_sum:\",bp_global_prob_sum)\n",
    "print(\"k_Bp:\",k_Bp)\n",
    "print(\"feat_dists_bu:\",feat_dists_bu)\n",
    "print(\"sum_e_bp:\",sum_e_bp)\n",
    "print(\"sum_e_bu:\",sum_e_bu)\n",
    "print(\"global_ratio:\",global_ratio)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "id": "600b6423",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(25.3457, device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 79,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum_e_bu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "id": "bb3ae0bc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(26.3437, device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 80,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum_e_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "cf4abad0",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.2500, device='cuda:0', grad_fn=<PowBackward0>)"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "loss_global"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "688ce07c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([15.3787, 16.0121, 15.4253, 15.6754, 16.1709, 15.9302, 16.6181, 15.6466,\n",
       "        16.4141, 16.9830, 14.4971, 14.9762, 15.7784, 16.2958, 16.1495, 14.7736,\n",
       "        15.1410, 15.6543, 16.2820, 16.7194, 16.9441, 16.7581, 16.3961, 15.5213,\n",
       "        15.6640, 15.7070, 16.2821, 17.3215, 15.1667, 16.1609, 16.2895, 15.4043,\n",
       "        15.4740, 14.9284, 15.5751, 14.2662, 15.6083, 17.0193, 15.5923, 15.1397],\n",
       "       device='cuda:0', grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "id": "69f718fb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(1.0011e-06, device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 72,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum_e_bu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "id": "63fefd71",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([17.9786, 17.8149, 17.2145, 18.3167, 16.4932, 17.2646, 18.8500, 17.8485,\n",
       "        16.4257, 17.3208, 17.3076, 17.1714, 17.3531, 17.5322, 17.8556, 18.1759,\n",
       "        17.2787, 18.6417, 18.6537, 18.1426, 17.9105, 19.2785, 16.3744, 16.7953,\n",
       "        18.3104, 18.5197, 18.4340, 18.1621, 17.8350, 17.6045, 17.5249, 17.0740,\n",
       "        17.7094, 17.5130, 18.7455, 18.2615, 16.3142, 17.2683, 17.7009, 18.7330],\n",
       "       device='cuda:0', grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 71,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "id": "ed949b47",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.1971, 0.2812, 0.2541, 0.1176, 0.1694, 0.1517, 0.1961, 0.1920, 0.2319,\n",
       "        0.2380, 0.0799, 0.1419, 0.1515, 0.1990, 0.2612, 0.1326, 0.2011, 0.1924,\n",
       "        0.2845, 0.2461, 0.2412, 0.1704, 0.2357, 0.1555, 0.0924, 0.1515, 0.2337,\n",
       "        0.2247, 0.1362, 0.1792, 0.3258, 0.3037, 0.2473, 0.2268, 0.1531, 0.1798,\n",
       "        0.2265, 0.3413, 0.1420, 0.1489], device='cuda:0',\n",
       "       grad_fn=<SqueezeBackward4>)"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "id": "ea8f2ada",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([17.3830, 18.8074, 17.2807, 16.2711, 16.5384, 17.5892, 17.5048, 18.6080,\n",
       "        17.4325, 17.3218, 15.9989, 17.1385, 16.9832, 17.4741, 17.7036, 16.7957,\n",
       "        16.9648, 16.8700, 18.7639, 16.2010, 15.8607, 17.6674, 17.1501, 17.4688,\n",
       "        17.7472, 18.1129, 17.2142, 16.6794, 17.3207, 17.3963, 17.6601, 16.1668,\n",
       "        17.2354, 17.4118, 17.9075, 17.9652, 16.5519, 17.9724, 17.1188, 17.2855],\n",
       "       device='cuda:0', grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "571cab27",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "74f15734",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ba38d45e",
   "metadata": {},
   "outputs": [],
   "source": [
    "#0630测试（每个epoch固定子图已经可以loss稳定下降）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a7d542b2",
   "metadata": {},
   "outputs": [],
   "source": [
    "class BSNE_Transformer(nn.Module):\n",
    "    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,\n",
    "                 nhead=8, num_layers=3, dim_feedforward=256,\n",
    "                 drop_rate=0.5):\n",
    "        super().__init__()\n",
    "        self.relation_nums = relation_nums\n",
    "        self.d_model = d_model\n",
    "        self.nhead = nhead\n",
    "\n",
    "        # 特征投影层添加残差\n",
    "        self.feature_proj = nn.Sequential(\n",
    "            nn.Linear(in_feat, d_model),\n",
    "            nn.LayerNorm(d_model),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        \n",
    "        # 添加残差投影层 (用于输入与输出的维度匹配)\n",
    "        self.residual_proj = nn.Linear(in_feat, d_model) if in_feat != d_model else nn.Identity()\n",
    "        \n",
    "        self.norm = nn.LayerNorm(d_model)\n",
    "\n",
    "        # 修改Transformer编码器层以支持残差连接\n",
    "        self.relation_encoders = nn.ModuleList([\n",
    "            TransformerEncoder(\n",
    "                TransformerEncoderLayer(\n",
    "                    d_model=d_model,\n",
    "                    nhead=nhead,\n",
    "                    dim_feedforward=dim_feedforward,\n",
    "                    dropout=drop_rate,\n",
    "                    batch_first=True\n",
    "                ),\n",
    "                num_layers=num_layers,\n",
    "                norm=nn.LayerNorm(d_model)  # 添加层归一化\n",
    "            ) for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        # 添加关系间的残差连接\n",
    "        self.relation_residual = nn.ModuleList([\n",
    "            nn.Linear(d_model, d_model) for _ in range(relation_nums)\n",
    "        ])\n",
    "        \n",
    "        # 分类器添加残差\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(relation_nums * d_model, 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop_rate),\n",
    "            nn.Linear(512, out_feat)\n",
    "        )\n",
    "        \n",
    "        # 添加分类器的残差连接\n",
    "        self.classifier_residual = nn.Linear(d_model, out_feat) if d_model != out_feat else nn.Identity()\n",
    "        \n",
    "        # 初始化权重\n",
    "        for p in self.parameters():\n",
    "            if p.dim() > 1:\n",
    "                nn.init.xavier_uniform_(p)\n",
    "\n",
    "    def forward(self, subgraph_batch):\n",
    "        center_logits_list = []\n",
    "        center_features_list = []\n",
    "\n",
    "        for sg in subgraph_batch:\n",
    "            # 原始特征保留用于残差连接\n",
    "            raw_features = sg['features']\n",
    "            \n",
    "            # 特征投影 + 残差连接\n",
    "            projected = self.feature_proj(raw_features.unsqueeze(0))\n",
    "            residual = self.residual_proj(raw_features.unsqueeze(0))\n",
    "            features = projected + residual\n",
    "            features = self.norm(features)\n",
    "\n",
    "            # 为每个关系类型单独处理\n",
    "            rel_outputs = []\n",
    "            num_nodes = features.size(1)\n",
    "\n",
    "            for rel_idx in range(self.relation_nums):\n",
    "                # 构建当前关系的邻接矩阵\n",
    "                edge_index = sg['edges'][rel_idx]\n",
    "                adj = torch.zeros(num_nodes, num_nodes,\n",
    "                                  dtype=torch.float, device=features.device)\n",
    "\n",
    "                if edge_index.size(1) > 0:\n",
    "                    src, dst = edge_index\n",
    "                    adj[src, dst] = 1.0\n",
    "\n",
    "                # 添加自环\n",
    "                adj[range(num_nodes), range(num_nodes)] = 1.0\n",
    "\n",
    "                # 创建注意力掩码\n",
    "                adj_mask = adj.masked_fill(adj == 0.0, float('-inf'))\n",
    "                adj_mask = adj_mask.masked_fill(adj == 1.0, 0.0)\n",
    "\n",
    "                # 扩展为多头注意力掩码\n",
    "                adj_mask = adj_mask.unsqueeze(0).unsqueeze(0)\n",
    "                adj_mask = adj_mask.expand(1, self.nhead, num_nodes, num_nodes)\n",
    "                adj_mask = adj_mask.reshape(-1, num_nodes, num_nodes)\n",
    "\n",
    "                # 关系编码 + 残差连接\n",
    "                encoder_input = features\n",
    "                encoder_output = self.relation_encoders[rel_idx](encoder_input)\n",
    "                \n",
    "                # 添加关系编码的残差连接\n",
    "                encoder_output = encoder_output + self.relation_residual[rel_idx](encoder_input)\n",
    "                rel_outputs.append(encoder_output)\n",
    "\n",
    "            # 合并多关系特征\n",
    "            combined = torch.cat(rel_outputs, dim=-1)  # [1, num_nodes, rel*d_model]\n",
    "\n",
    "            # 提取中心节点特征\n",
    "            center_features = combined[:, 0, :]  # [1, rel*d_model]\n",
    "            \n",
    "            # 分类器 + 残差连接\n",
    "            center_logits = self.classifier(center_features)\n",
    "            \n",
    "            # 添加分类器的残差连接\n",
    "            residual_logits = self.classifier_residual(center_features[:, :self.d_model])\n",
    "            center_logits = center_logits + residual_logits\n",
    "            \n",
    "            center_logits = F.log_softmax(center_logits, dim=-1)\n",
    "\n",
    "            center_logits_list.append(center_logits)\n",
    "            center_features_list.append(center_features)\n",
    "\n",
    "        # 将结果堆叠为批次\n",
    "        center_logits = torch.cat(center_logits_list, dim=0)\n",
    "        center_features = torch.cat(center_features_list, dim=0)\n",
    "\n",
    "        return center_logits, center_features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4e9790ff",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "\n",
      "=== Pretraining Epoch 0 (Center Node: 268) ===\n",
      "Step 0: BSNE_Loss=0.3096,local_loss: 0.027310098603291266,global_loss: 0.28227484226226807\n",
      "Step 1: BSNE_Loss=0.1839,local_loss: 0.0187000550438459,global_loss: 0.16519863903522491\n",
      "Step 2: BSNE_Loss=0.1722,local_loss: 0.01776733895370456,global_loss: 0.1544652134180069\n",
      "Step 3: BSNE_Loss=0.4195,local_loss: 0.031697590821644965,global_loss: 0.3878135681152344\n",
      "Step 4: BSNE_Loss=0.3418,local_loss: 0.02985427907852212,global_loss: 0.3119572103023529\n",
      "Step 5: BSNE_Loss=0.2335,local_loss: 0.025948343198833827,global_loss: 0.2075144499540329\n",
      "Step 6: BSNE_Loss=0.3034,local_loss: 0.030337164782724385,global_loss: 0.27308690547943115\n",
      "Step 7: BSNE_Loss=0.2337,local_loss: 0.020975980466086827,global_loss: 0.2127065360546112\n",
      "Step 8: BSNE_Loss=0.2319,local_loss: 0.025986152767131628,global_loss: 0.20590172708034515\n",
      "Step 9: BSNE_Loss=0.1335,local_loss: 0.015335009275205239,global_loss: 0.11814568191766739\n",
      "Step 13: BSNE_Loss=0.2777,local_loss: 0.026076412950298063,global_loss: 0.2516663074493408\n",
      "Step 14: BSNE_Loss=0.2310,local_loss: 0.0214460822948695,global_loss: 0.2095656841993332\n",
      "Step 15: BSNE_Loss=0.2301,local_loss: 0.024278238987583568,global_loss: 0.20579469203948975\n",
      "Step 16: BSNE_Loss=0.2620,local_loss: 0.025296207689381258,global_loss: 0.23671208322048187\n",
      "Step 17: BSNE_Loss=0.1892,local_loss: 0.01892828262768351,global_loss: 0.17023946344852448\n",
      "Step 18: BSNE_Loss=0.3747,local_loss: 0.0322712713330329,global_loss: 0.34238147735595703\n",
      "Step 19: BSNE_Loss=0.2332,local_loss: 0.025951239140238926,global_loss: 0.20729471743106842\n",
      "Step 20: BSNE_Loss=0.1757,local_loss: 0.02114812514840394,global_loss: 0.15458419919013977\n",
      "Step 21: BSNE_Loss=0.1760,local_loss: 0.01942180430309306,global_loss: 0.15661044418811798\n",
      "Step 22: BSNE_Loss=0.1672,local_loss: 0.020693232024569023,global_loss: 0.14646148681640625\n",
      "Step 23: BSNE_Loss=0.2477,local_loss: 0.02642842203322046,global_loss: 0.22128187119960785\n",
      "Step 24: BSNE_Loss=0.1584,local_loss: 0.02258591158965602,global_loss: 0.13579663634300232\n",
      "Step 25: BSNE_Loss=0.2867,local_loss: 0.028566936384011834,global_loss: 0.2580862045288086\n",
      "Step 26: BSNE_Loss=0.1104,local_loss: 0.01231713613826336,global_loss: 0.09807679802179337\n",
      "Step 27: BSNE_Loss=0.2458,local_loss: 0.024978037936625598,global_loss: 0.22079312801361084\n",
      "Step 28: BSNE_Loss=0.1919,local_loss: 0.021866444943069507,global_loss: 0.17003130912780762\n",
      "Step 29: BSNE_Loss=0.2867,local_loss: 0.029563276577885003,global_loss: 0.25713858008384705\n",
      "Step 30: BSNE_Loss=0.1245,local_loss: 0.020367597931560632,global_loss: 0.10418162494897842\n",
      "Step 31: BSNE_Loss=0.2748,local_loss: 0.026507505376709428,global_loss: 0.24831251800060272\n",
      "Step 32: BSNE_Loss=0.0961,local_loss: 0.0138056542902696,global_loss: 0.08226801455020905\n",
      "Step 33: BSNE_Loss=0.1905,local_loss: 0.025019204262456546,global_loss: 0.16544952988624573\n",
      "Step 34: BSNE_Loss=0.3411,local_loss: 0.034875212276717804,global_loss: 0.30626022815704346\n",
      "Step 35: BSNE_Loss=0.2576,local_loss: 0.03194169376429611,global_loss: 0.2256663590669632\n",
      "Step 36: BSNE_Loss=0.2322,local_loss: 0.02287876305022362,global_loss: 0.2093304544687271\n",
      "Step 37: BSNE_Loss=0.2193,local_loss: 0.030472346780551274,global_loss: 0.1887953132390976\n",
      "Step 38: BSNE_Loss=0.2967,local_loss: 0.030589771864141947,global_loss: 0.26613888144493103\n",
      "Step 39: BSNE_Loss=0.2852,local_loss: 0.03221625618409454,global_loss: 0.25299176573753357\n",
      "Step 40: BSNE_Loss=0.2818,local_loss: 0.032479733164866034,global_loss: 0.2493339329957962\n",
      "Step 41: BSNE_Loss=0.2176,local_loss: 0.025391870831625897,global_loss: 0.19220757484436035\n",
      "Step 42: BSNE_Loss=0.2762,local_loss: 0.03686747027532748,global_loss: 0.2392870932817459\n",
      "Step 43: BSNE_Loss=0.1722,local_loss: 0.030891139380350492,global_loss: 0.14132782816886902\n",
      "Step 44: BSNE_Loss=0.4031,local_loss: 0.04125690528076749,global_loss: 0.36181288957595825\n",
      "Step 45: BSNE_Loss=0.2620,local_loss: 0.029755466861003454,global_loss: 0.2322024554014206\n",
      "Step 46: BSNE_Loss=0.2533,local_loss: 0.032269194238653376,global_loss: 0.2210734635591507\n",
      "Step 47: BSNE_Loss=0.1882,local_loss: 0.024957831536263998,global_loss: 0.16327489912509918\n",
      "Step 48: BSNE_Loss=0.2720,local_loss: 0.03245914645529979,global_loss: 0.2394999861717224\n",
      "Step 49: BSNE_Loss=0.1311,local_loss: 0.019996884809481273,global_loss: 0.11105986684560776\n",
      "Step 50: BSNE_Loss=0.2039,local_loss: 0.022122427456774476,global_loss: 0.1817403882741928\n",
      "Step 51: BSNE_Loss=0.1451,local_loss: 0.02318661597127197,global_loss: 0.12195955216884613\n",
      "Step 52: BSNE_Loss=0.2685,local_loss: 0.03772608247974055,global_loss: 0.23081402480602264\n",
      "Step 53: BSNE_Loss=0.1872,local_loss: 0.02938235171936698,global_loss: 0.15778112411499023\n",
      "Step 54: BSNE_Loss=0.2861,local_loss: 0.03532347339467934,global_loss: 0.2507610023021698\n",
      "Step 55: BSNE_Loss=0.1941,local_loss: 0.02802795899204265,global_loss: 0.16603806614875793\n",
      "Step 56: BSNE_Loss=0.3026,local_loss: 0.031622977284027944,global_loss: 0.2710255980491638\n",
      "Step 57: BSNE_Loss=0.2212,local_loss: 0.029014222272426678,global_loss: 0.19217777252197266\n",
      "Step 58: BSNE_Loss=0.2715,local_loss: 0.03362025958407079,global_loss: 0.23792247474193573\n",
      "Step 59: BSNE_Loss=0.3080,local_loss: 0.0358680559112102,global_loss: 0.27211490273475647\n",
      "Step 60: BSNE_Loss=0.0973,local_loss: 0.016593229465259846,global_loss: 0.08069319278001785\n",
      "Step 61: BSNE_Loss=0.1878,local_loss: 0.025138230260896025,global_loss: 0.1626884788274765\n",
      "Step 62: BSNE_Loss=0.1741,local_loss: 0.03411629393795986,global_loss: 0.13996092975139618\n",
      "Step 63: BSNE_Loss=0.3263,local_loss: 0.04019708303594056,global_loss: 0.28612083196640015\n",
      "Step 64: BSNE_Loss=0.3035,local_loss: 0.03511881206526592,global_loss: 0.2683631479740143\n",
      "Step 65: BSNE_Loss=0.2113,local_loss: 0.029450629660742886,global_loss: 0.18183360993862152\n",
      "Step 66: BSNE_Loss=0.2752,local_loss: 0.034848865129435455,global_loss: 0.2403794527053833\n",
      "Step 67: BSNE_Loss=0.1792,local_loss: 0.02552470849651537,global_loss: 0.15369278192520142\n",
      "Step 68: BSNE_Loss=0.1755,local_loss: 0.024657992205754372,global_loss: 0.15084828436374664\n",
      "Step 69: BSNE_Loss=0.3805,local_loss: 0.0427379186792016,global_loss: 0.337727814912796\n",
      "Step 70: BSNE_Loss=0.2546,local_loss: 0.038424102890182485,global_loss: 0.21614663302898407\n",
      "Step 71: BSNE_Loss=0.2478,local_loss: 0.03499720701731359,global_loss: 0.2128150314092636\n",
      "Step 72: BSNE_Loss=0.2268,local_loss: 0.04188298194605289,global_loss: 0.1849358230829239\n",
      "Step 73: BSNE_Loss=0.2360,local_loss: 0.03723400477062988,global_loss: 0.19875521957874298\n",
      "Step 74: BSNE_Loss=0.1549,local_loss: 0.032512188600758836,global_loss: 0.12236003577709198\n",
      "Step 75: BSNE_Loss=0.2822,local_loss: 0.03417221825573749,global_loss: 0.24801218509674072\n",
      "Step 76: BSNE_Loss=0.2395,local_loss: 0.026373708079374086,global_loss: 0.2131320983171463\n",
      "Step 77: BSNE_Loss=0.1780,local_loss: 0.036547754115965926,global_loss: 0.14141693711280823\n",
      "Step 78: BSNE_Loss=0.2517,local_loss: 0.034576911806393176,global_loss: 0.21715191006660461\n",
      "Step 79: BSNE_Loss=0.2011,local_loss: 0.041162624250070404,global_loss: 0.15996122360229492\n",
      "Step 80: BSNE_Loss=0.1559,local_loss: 0.036413840882539045,global_loss: 0.11944083869457245\n",
      "Step 81: BSNE_Loss=0.1258,local_loss: 0.024343576787639296,global_loss: 0.10147971659898758\n",
      "Step 82: BSNE_Loss=0.3186,local_loss: 0.03872094460143505,global_loss: 0.2798493206501007\n",
      "Step 83: BSNE_Loss=0.2446,local_loss: 0.04147372001342722,global_loss: 0.20308944582939148\n",
      "Step 84: BSNE_Loss=0.2793,local_loss: 0.03616777953868359,global_loss: 0.24311098456382751\n",
      "Step 85: BSNE_Loss=0.2256,local_loss: 0.034074475212523214,global_loss: 0.19152341783046722\n",
      "Step 86: BSNE_Loss=0.1656,local_loss: 0.030772570575597936,global_loss: 0.13482673466205597\n",
      "Step 87: BSNE_Loss=0.3148,local_loss: 0.04213245797055123,global_loss: 0.27270662784576416\n",
      "Step 88: BSNE_Loss=0.1669,local_loss: 0.05031283051715103,global_loss: 0.1165551170706749\n",
      "Step 89: BSNE_Loss=0.0830,local_loss: 0.02388392812997667,global_loss: 0.05914589390158653\n",
      "Step 90: BSNE_Loss=0.2064,local_loss: 0.039917304879562264,global_loss: 0.16648134589195251\n",
      "Step 91: BSNE_Loss=0.3363,local_loss: 0.04450234498524733,global_loss: 0.2918340265750885\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 92: BSNE_Loss=0.2858,local_loss: 0.04706282643451363,global_loss: 0.23873214423656464\n",
      "Step 93: BSNE_Loss=0.1762,local_loss: 0.04221407524994049,global_loss: 0.13396912813186646\n",
      "Step 94: BSNE_Loss=0.0134,local_loss: 0.03183786130035594,global_loss: -0.01848730258643627\n",
      "Center node 268 finished at step 95 with loss 0.0134\n",
      "\n",
      "=== Pretraining Epoch 1 (Center Node: 7572) ===\n",
      "Step 0: BSNE_Loss=0.6020,local_loss: 0.020961445570176963,global_loss: 0.5810606479644775\n",
      "Step 1: BSNE_Loss=0.5441,local_loss: 0.026721343948309287,global_loss: 0.5173478126525879\n",
      "Step 2: BSNE_Loss=0.5388,local_loss: 0.025157643404673587,global_loss: 0.513611912727356\n",
      "Step 3: BSNE_Loss=0.4550,local_loss: 0.02399998006805941,global_loss: 0.43099987506866455\n",
      "Step 4: BSNE_Loss=0.5389,local_loss: 0.02519230626457402,global_loss: 0.5136944651603699\n",
      "Step 5: BSNE_Loss=0.4806,local_loss: 0.030326163696330677,global_loss: 0.4502835273742676\n",
      "Step 6: BSNE_Loss=0.5455,local_loss: 0.023896595745241358,global_loss: 0.5216167569160461\n",
      "Step 7: BSNE_Loss=0.4609,local_loss: 0.028300203424098322,global_loss: 0.4325748085975647\n",
      "Step 8: BSNE_Loss=0.4915,local_loss: 0.028564641642058776,global_loss: 0.46295446157455444\n",
      "Step 9: BSNE_Loss=0.6128,local_loss: 0.024681651196981118,global_loss: 0.5881331562995911\n",
      "Step 10: BSNE_Loss=0.4724,local_loss: 0.026120627253878877,global_loss: 0.44631364941596985\n",
      "Step 11: BSNE_Loss=0.5982,local_loss: 0.022498055233973288,global_loss: 0.5756794214248657\n",
      "Step 12: BSNE_Loss=0.5484,local_loss: 0.027338354152085974,global_loss: 0.5210134983062744\n",
      "Step 13: BSNE_Loss=0.5315,local_loss: 0.02930977094444168,global_loss: 0.5022307634353638\n",
      "Step 14: BSNE_Loss=0.4123,local_loss: 0.030281356208477155,global_loss: 0.38202548027038574\n",
      "Step 15: BSNE_Loss=0.5014,local_loss: 0.03126099195161394,global_loss: 0.47013556957244873\n",
      "Step 16: BSNE_Loss=0.5385,local_loss: 0.029034470792458876,global_loss: 0.5094284415245056\n",
      "Step 17: BSNE_Loss=0.5227,local_loss: 0.0295790946846199,global_loss: 0.49307096004486084\n",
      "Step 18: BSNE_Loss=0.4422,local_loss: 0.0360190999722025,global_loss: 0.4061553180217743\n",
      "Step 19: BSNE_Loss=0.5864,local_loss: 0.02511701481895219,global_loss: 0.5612509250640869\n",
      "Step 20: BSNE_Loss=0.5408,local_loss: 0.030485609459092396,global_loss: 0.5102918148040771\n",
      "Step 21: BSNE_Loss=0.5869,local_loss: 0.028847263562379155,global_loss: 0.5580329298973083\n",
      "Step 22: BSNE_Loss=0.5058,local_loss: 0.034204445502340385,global_loss: 0.4716188311576843\n",
      "Step 23: BSNE_Loss=0.3697,local_loss: 0.040181677984066695,global_loss: 0.32950860261917114\n",
      "Step 24: BSNE_Loss=0.5747,local_loss: 0.025432706709486615,global_loss: 0.5492587089538574\n",
      "Step 25: BSNE_Loss=0.4652,local_loss: 0.03418907023832725,global_loss: 0.4310094118118286\n",
      "Step 26: BSNE_Loss=0.4552,local_loss: 0.03330245143212661,global_loss: 0.4218887686729431\n",
      "Step 27: BSNE_Loss=0.4958,local_loss: 0.031111149447433116,global_loss: 0.4647350311279297\n",
      "Step 28: BSNE_Loss=0.3987,local_loss: 0.03861133788103832,global_loss: 0.36012986302375793\n",
      "Step 29: BSNE_Loss=0.4526,local_loss: 0.03086340267966016,global_loss: 0.42174726724624634\n",
      "Step 30: BSNE_Loss=0.4783,local_loss: 0.0333014923318543,global_loss: 0.44498956203460693\n",
      "Step 31: BSNE_Loss=0.5090,local_loss: 0.03102877350119584,global_loss: 0.47795629501342773\n",
      "Step 32: BSNE_Loss=0.4285,local_loss: 0.03838512701948802,global_loss: 0.39015817642211914\n",
      "Step 33: BSNE_Loss=0.4149,local_loss: 0.03874401092497989,global_loss: 0.37620460987091064\n",
      "Step 34: BSNE_Loss=0.5167,local_loss: 0.03831561557917262,global_loss: 0.47836413979530334\n",
      "Step 35: BSNE_Loss=0.4539,local_loss: 0.03590103964235991,global_loss: 0.4180067479610443\n",
      "Step 36: BSNE_Loss=0.4753,local_loss: 0.03717530506657751,global_loss: 0.4381145238876343\n",
      "Step 37: BSNE_Loss=0.6738,local_loss: 0.027537083682126092,global_loss: 0.6462811231613159\n",
      "Step 38: BSNE_Loss=0.4914,local_loss: 0.031091442469412173,global_loss: 0.4603249430656433\n",
      "Step 39: BSNE_Loss=0.4202,local_loss: 0.04035586834470867,global_loss: 0.3798860013484955\n",
      "Step 40: BSNE_Loss=0.5063,local_loss: 0.03944826869852155,global_loss: 0.4668712019920349\n",
      "Step 41: BSNE_Loss=0.4729,local_loss: 0.041699943456276985,global_loss: 0.43117547035217285\n",
      "Step 42: BSNE_Loss=0.4282,local_loss: 0.03757329532668401,global_loss: 0.3906640410423279\n",
      "Step 43: BSNE_Loss=0.5556,local_loss: 0.029536610969830887,global_loss: 0.5260903835296631\n",
      "Step 44: BSNE_Loss=0.3825,local_loss: 0.03943015506867484,global_loss: 0.34305495023727417\n",
      "Step 45: BSNE_Loss=0.4287,local_loss: 0.044910783002675675,global_loss: 0.3838057219982147\n",
      "Step 46: BSNE_Loss=0.3931,local_loss: 0.040144441541758676,global_loss: 0.3529829978942871\n",
      "Step 47: BSNE_Loss=0.5565,local_loss: 0.0344410066881676,global_loss: 0.522103488445282\n",
      "Step 48: BSNE_Loss=0.4580,local_loss: 0.035537493017103294,global_loss: 0.4224163293838501\n",
      "Step 49: BSNE_Loss=0.3684,local_loss: 0.036481972145486236,global_loss: 0.33194950222969055\n",
      "Step 50: BSNE_Loss=0.4434,local_loss: 0.045101247370275106,global_loss: 0.398276686668396\n",
      "Step 51: BSNE_Loss=0.4325,local_loss: 0.046750315884422425,global_loss: 0.3857171833515167\n",
      "Step 52: BSNE_Loss=0.5411,local_loss: 0.03806187276667708,global_loss: 0.5030611157417297\n",
      "Step 53: BSNE_Loss=0.5649,local_loss: 0.03825669767284033,global_loss: 0.5266790390014648\n",
      "Step 54: BSNE_Loss=0.5754,local_loss: 0.04455624400960613,global_loss: 0.5308210849761963\n",
      "Step 55: BSNE_Loss=0.4360,local_loss: 0.04625612402855751,global_loss: 0.38971754908561707\n",
      "Step 56: BSNE_Loss=0.5088,local_loss: 0.04551735451448122,global_loss: 0.46329355239868164\n",
      "Step 57: BSNE_Loss=0.4330,local_loss: 0.049479905280493675,global_loss: 0.3835643231868744\n",
      "Step 58: BSNE_Loss=0.3596,local_loss: 0.041377711215159985,global_loss: 0.31817737221717834\n",
      "Step 59: BSNE_Loss=0.4299,local_loss: 0.0511727650994068,global_loss: 0.3786809742450714\n",
      "Step 60: BSNE_Loss=0.5006,local_loss: 0.04439979642427371,global_loss: 0.4562014937400818\n",
      "Step 61: BSNE_Loss=0.3793,local_loss: 0.04592583440368912,global_loss: 0.33339154720306396\n",
      "Step 62: BSNE_Loss=0.4609,local_loss: 0.04873857183724033,global_loss: 0.4121277332305908\n",
      "Step 63: BSNE_Loss=0.5564,local_loss: 0.04885348133475227,global_loss: 0.5075682401657104\n",
      "Step 64: BSNE_Loss=0.4659,local_loss: 0.044310694244790734,global_loss: 0.42158743739128113\n",
      "Step 65: BSNE_Loss=0.4731,local_loss: 0.04955724476765549,global_loss: 0.4235665798187256\n",
      "Step 66: BSNE_Loss=0.5440,local_loss: 0.041978917060195665,global_loss: 0.5020248889923096\n",
      "Step 67: BSNE_Loss=0.4625,local_loss: 0.049388755552170274,global_loss: 0.41312989592552185\n",
      "Step 68: BSNE_Loss=0.3794,local_loss: 0.0502191631162561,global_loss: 0.3291556239128113\n",
      "Step 69: BSNE_Loss=0.3821,local_loss: 0.052618434997993006,global_loss: 0.3295224905014038\n",
      "Step 70: BSNE_Loss=0.3776,local_loss: 0.03772011741237857,global_loss: 0.33988767862319946\n",
      "Step 71: BSNE_Loss=0.2962,local_loss: 0.06088358843923998,global_loss: 0.2352711260318756\n",
      "Step 72: BSNE_Loss=0.4218,local_loss: 0.056422540797282816,global_loss: 0.36536940932273865\n",
      "Step 73: BSNE_Loss=0.4138,local_loss: 0.06170387050562104,global_loss: 0.3521323502063751\n",
      "Step 74: BSNE_Loss=0.3757,local_loss: 0.0593489812164354,global_loss: 0.3163312077522278\n",
      "Step 75: BSNE_Loss=0.3952,local_loss: 0.04834841288991441,global_loss: 0.346823126077652\n",
      "Step 76: BSNE_Loss=0.3950,local_loss: 0.05911630919161862,global_loss: 0.3359118700027466\n",
      "Step 77: BSNE_Loss=0.4115,local_loss: 0.05458995336238351,global_loss: 0.3568785488605499\n",
      "Step 78: BSNE_Loss=0.2934,local_loss: 0.07316323238260364,global_loss: 0.22019034624099731\n",
      "Step 79: BSNE_Loss=0.2046,local_loss: 0.06527578043596692,global_loss: 0.13930436968803406\n",
      "Step 80: BSNE_Loss=0.4080,local_loss: 0.0440056064302507,global_loss: 0.36396318674087524\n",
      "Step 81: BSNE_Loss=0.5397,local_loss: 0.05292435430839993,global_loss: 0.4867274761199951\n",
      "Step 82: BSNE_Loss=0.4352,local_loss: 0.049993658723796686,global_loss: 0.3852333128452301\n",
      "Step 83: BSNE_Loss=0.3819,local_loss: 0.049250916589392506,global_loss: 0.3326517641544342\n",
      "Step 84: BSNE_Loss=0.4818,local_loss: 0.047756914677701046,global_loss: 0.43400007486343384\n",
      "Step 85: BSNE_Loss=0.3827,local_loss: 0.05422505625182243,global_loss: 0.3285187780857086\n",
      "Step 86: BSNE_Loss=0.5473,local_loss: 0.0520634702051957,global_loss: 0.49528592824935913\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 87: BSNE_Loss=0.2556,local_loss: 0.06310219890433572,global_loss: 0.19254274666309357\n",
      "Step 88: BSNE_Loss=0.3264,local_loss: 0.06256269424470093,global_loss: 0.2638436257839203\n",
      "Step 89: BSNE_Loss=0.4109,local_loss: 0.07101389507146394,global_loss: 0.33986204862594604\n",
      "Step 90: BSNE_Loss=0.2919,local_loss: 0.0651001118658665,global_loss: 0.22679707407951355\n",
      "Step 91: BSNE_Loss=0.3730,local_loss: 0.05757167014997056,global_loss: 0.31539905071258545\n",
      "Step 92: BSNE_Loss=0.3594,local_loss: 0.05198246107855795,global_loss: 0.30737531185150146\n",
      "Step 93: BSNE_Loss=0.3642,local_loss: 0.0742660341823429,global_loss: 0.28990888595581055\n",
      "Step 94: BSNE_Loss=0.5147,local_loss: 0.05571801451012231,global_loss: 0.45897138118743896\n",
      "Step 95: BSNE_Loss=0.5505,local_loss: 0.06020186231882191,global_loss: 0.4902680516242981\n",
      "Step 96: BSNE_Loss=0.1781,local_loss: 0.06357172486067274,global_loss: 0.11457578092813492\n",
      "Step 97: BSNE_Loss=0.6444,local_loss: 0.04430621145607126,global_loss: 0.6000678539276123\n",
      "Step 98: BSNE_Loss=0.2765,local_loss: 0.06824801928116285,global_loss: 0.20824632048606873\n",
      "Step 99: BSNE_Loss=0.4179,local_loss: 0.061478917541934554,global_loss: 0.35639697313308716\n",
      "Step 100: BSNE_Loss=0.2570,local_loss: 0.06611570019284567,global_loss: 0.19090892374515533\n",
      "Step 101: BSNE_Loss=0.2386,local_loss: 0.08252826403637623,global_loss: 0.15611454844474792\n",
      "Step 102: BSNE_Loss=0.2002,local_loss: 0.07680492768996602,global_loss: 0.12341796606779099\n",
      "Step 103: BSNE_Loss=0.2941,local_loss: 0.07105128656098178,global_loss: 0.2230496108531952\n",
      "Step 104: BSNE_Loss=0.4201,local_loss: 0.06516621924324724,global_loss: 0.35492992401123047\n",
      "Step 105: BSNE_Loss=0.4078,local_loss: 0.06825504092611616,global_loss: 0.339523047208786\n",
      "Step 106: BSNE_Loss=0.1974,local_loss: 0.0780948306870654,global_loss: 0.11930711567401886\n",
      "Step 107: BSNE_Loss=0.1359,local_loss: 0.08453920476653685,global_loss: 0.05133466422557831\n",
      "Step 108: BSNE_Loss=0.5115,local_loss: 0.058314731420972696,global_loss: 0.45321813225746155\n",
      "Step 109: BSNE_Loss=0.2859,local_loss: 0.06881281232263764,global_loss: 0.21713244915008545\n",
      "Step 110: BSNE_Loss=0.3213,local_loss: 0.08847658823663992,global_loss: 0.2328418791294098\n",
      "Step 111: BSNE_Loss=0.1442,local_loss: 0.07231866219294741,global_loss: 0.0719011127948761\n",
      "Step 112: BSNE_Loss=0.3987,local_loss: 0.07360855037202704,global_loss: 0.3250740170478821\n",
      "Step 113: BSNE_Loss=0.1923,local_loss: 0.05605835797611251,global_loss: 0.13621075451374054\n",
      "Step 114: BSNE_Loss=0.5302,local_loss: 0.06213721099666882,global_loss: 0.468055784702301\n",
      "Step 115: BSNE_Loss=0.0295,local_loss: 0.09017413433998991,global_loss: -0.06066172942519188\n",
      "Center node 7572 finished at step 116 with loss 0.0295\n",
      "\n",
      "=== Pretraining Epoch 2 (Center Node: 7964) ===\n",
      "Step 0: BSNE_Loss=0.3646,local_loss: 0.09501517476678462,global_loss: 0.26958754658699036\n",
      "Step 1: BSNE_Loss=0.2251,local_loss: 0.08125060498304018,global_loss: 0.1438918113708496\n",
      "Step 2: BSNE_Loss=0.3437,local_loss: 0.05990361256861928,global_loss: 0.28376439213752747\n",
      "Step 3: BSNE_Loss=0.1655,local_loss: 0.07202514455763866,global_loss: 0.09345010668039322\n",
      "Step 4: BSNE_Loss=0.3629,local_loss: 0.09771202882497486,global_loss: 0.2652203440666199\n",
      "Step 5: BSNE_Loss=0.1670,local_loss: 0.10067234660248574,global_loss: 0.06628930568695068\n",
      "Step 6: BSNE_Loss=0.1801,local_loss: 0.08615004129057868,global_loss: 0.09397155791521072\n",
      "Step 7: BSNE_Loss=0.3465,local_loss: 0.07642092727228735,global_loss: 0.27004510164260864\n",
      "Step 8: BSNE_Loss=0.2815,local_loss: 0.09104775712225158,global_loss: 0.19048373401165009\n",
      "Step 9: BSNE_Loss=0.5744,local_loss: 0.07376851593607545,global_loss: 0.5006580948829651\n",
      "Step 10: BSNE_Loss=0.3730,local_loss: 0.06007172289547677,global_loss: 0.3129511773586273\n",
      "Step 11: BSNE_Loss=0.1427,local_loss: 0.0941306424740627,global_loss: 0.048576198518276215\n",
      "Step 12: BSNE_Loss=0.0343,local_loss: 0.10058311042766904,global_loss: -0.06627074629068375\n",
      "Center node 7964 finished at step 13 with loss 0.0343\n",
      "\n",
      "=== Pretraining Epoch 3 (Center Node: 7312) ===\n",
      "Step 0: BSNE_Loss=0.5433,local_loss: 0.028442431094660364,global_loss: 0.5148921012878418\n",
      "Step 1: BSNE_Loss=0.6677,local_loss: 0.03843136068549524,global_loss: 0.6292589902877808\n",
      "Step 2: BSNE_Loss=0.5808,local_loss: 0.029921855983164294,global_loss: 0.5508407354354858\n",
      "Step 3: BSNE_Loss=0.5594,local_loss: 0.032379220559898655,global_loss: 0.5269870758056641\n",
      "Step 4: BSNE_Loss=0.7126,local_loss: 0.030698610811507952,global_loss: 0.6819376349449158\n",
      "Step 5: BSNE_Loss=0.6530,local_loss: 0.023217764805569533,global_loss: 0.629814088344574\n",
      "Step 6: BSNE_Loss=0.6524,local_loss: 0.01523574622562658,global_loss: 0.637115478515625\n",
      "Step 7: BSNE_Loss=0.6073,local_loss: 0.022759278039545253,global_loss: 0.5845468640327454\n",
      "Step 8: BSNE_Loss=0.4625,local_loss: 0.035076242710166086,global_loss: 0.4274648427963257\n",
      "Step 9: BSNE_Loss=0.3112,local_loss: 0.05076887815582317,global_loss: 0.2604409158229828\n",
      "Step 10: BSNE_Loss=0.2823,local_loss: 0.052452213402022,global_loss: 0.22980996966362\n",
      "Step 11: BSNE_Loss=0.4467,local_loss: 0.06361868896539195,global_loss: 0.38312384486198425\n",
      "Step 12: BSNE_Loss=0.3737,local_loss: 0.04507989362997922,global_loss: 0.3285968005657196\n",
      "Step 13: BSNE_Loss=0.3765,local_loss: 0.04725908500150981,global_loss: 0.32927998900413513\n",
      "Step 14: BSNE_Loss=0.4213,local_loss: 0.0631703589425737,global_loss: 0.35817286372184753\n",
      "Step 15: BSNE_Loss=0.3723,local_loss: 0.051726900402953746,global_loss: 0.320546418428421\n",
      "Step 16: BSNE_Loss=0.4485,local_loss: 0.04577104528845932,global_loss: 0.4027526080608368\n",
      "Step 17: BSNE_Loss=0.2547,local_loss: 0.06420343926354688,global_loss: 0.19048678874969482\n",
      "Step 18: BSNE_Loss=0.3064,local_loss: 0.05039944433597324,global_loss: 0.2559966444969177\n",
      "Step 19: BSNE_Loss=0.4732,local_loss: 0.04451992110294983,global_loss: 0.4287300705909729\n",
      "Step 20: BSNE_Loss=0.4708,local_loss: 0.05067294146392159,global_loss: 0.4201661944389343\n",
      "Step 21: BSNE_Loss=0.3698,local_loss: 0.04891457401987311,global_loss: 0.3208560049533844\n",
      "Step 22: BSNE_Loss=0.4167,local_loss: 0.058109901414152274,global_loss: 0.3585624098777771\n",
      "Step 23: BSNE_Loss=0.5259,local_loss: 0.03993078085381563,global_loss: 0.48597192764282227\n",
      "Step 24: BSNE_Loss=0.3886,local_loss: 0.04675351511264396,global_loss: 0.34188225865364075\n",
      "Step 25: BSNE_Loss=0.5039,local_loss: 0.034723912377551576,global_loss: 0.46917134523391724\n",
      "Step 26: BSNE_Loss=0.3839,local_loss: 0.048005242402933246,global_loss: 0.33590206503868103\n",
      "Step 27: BSNE_Loss=0.0840,local_loss: 0.07383235435506504,global_loss: 0.010131951421499252\n",
      "Step 28: BSNE_Loss=0.2170,local_loss: 0.06498851610541599,global_loss: 0.15200738608837128\n",
      "Step 29: BSNE_Loss=0.2928,local_loss: 0.05114998832585724,global_loss: 0.24160002171993256\n",
      "Step 30: BSNE_Loss=0.4407,local_loss: 0.05034197535518869,global_loss: 0.3903779983520508\n",
      "Step 31: BSNE_Loss=0.3707,local_loss: 0.07000357372470903,global_loss: 0.30073004961013794\n",
      "Step 32: BSNE_Loss=0.1250,local_loss: 0.07248479833363616,global_loss: 0.052539534866809845\n",
      "Step 33: BSNE_Loss=0.3195,local_loss: 0.06297667526916992,global_loss: 0.2565423846244812\n",
      "Step 34: BSNE_Loss=0.3011,local_loss: 0.05186954697334688,global_loss: 0.24921296536922455\n",
      "Step 35: BSNE_Loss=0.2064,local_loss: 0.06388170868629049,global_loss: 0.14249388873577118\n",
      "Step 36: BSNE_Loss=0.0675,local_loss: 0.08138301266032227,global_loss: -0.013931185938417912\n",
      "Step 37: BSNE_Loss=0.1292,local_loss: 0.0782456232283171,global_loss: 0.05094049870967865\n",
      "Step 38: BSNE_Loss=0.4180,local_loss: 0.05716374410830048,global_loss: 0.3608480989933014\n",
      "Step 39: BSNE_Loss=0.4111,local_loss: 0.05984830097028693,global_loss: 0.35129398107528687\n",
      "Step 40: BSNE_Loss=0.3416,local_loss: 0.0729472516525716,global_loss: 0.2686850428581238\n",
      "Step 41: BSNE_Loss=0.0368,local_loss: 0.0911273842844785,global_loss: -0.05428623408079147\n",
      "Center node 7312 finished at step 42 with loss 0.0368\n",
      "\n",
      "=== Pretraining Epoch 4 (Center Node: 6714) ===\n",
      "Step 0: BSNE_Loss=0.5208,local_loss: 0.04542336121529997,global_loss: 0.47533535957336426\n",
      "Step 1: BSNE_Loss=0.2997,local_loss: 0.05299782001245039,global_loss: 0.24673618376255035\n",
      "Step 2: BSNE_Loss=0.4032,local_loss: 0.032381348287433165,global_loss: 0.3708658516407013\n",
      "Step 3: BSNE_Loss=0.1485,local_loss: 0.06033297052703841,global_loss: 0.08818760514259338\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 4: BSNE_Loss=0.2862,local_loss: 0.04416435446616939,global_loss: 0.24204948544502258\n",
      "Step 5: BSNE_Loss=0.3949,local_loss: 0.052550175170348844,global_loss: 0.3423863649368286\n",
      "Step 6: BSNE_Loss=0.2165,local_loss: 0.06085164179244716,global_loss: 0.155660942196846\n",
      "Step 7: BSNE_Loss=0.2489,local_loss: 0.06771373258025931,global_loss: 0.18123620748519897\n",
      "Step 8: BSNE_Loss=0.0176,local_loss: 0.08920321840049632,global_loss: -0.07164160162210464\n",
      "Center node 6714 finished at step 9 with loss 0.0176\n",
      "\n",
      "=== Pretraining Epoch 5 (Center Node: 7617) ===\n",
      "Step 0: BSNE_Loss=0.3123,local_loss: 0.0546999340215638,global_loss: 0.25759872794151306\n",
      "Step 1: BSNE_Loss=0.3193,local_loss: 0.06739339930588674,global_loss: 0.2518882751464844\n",
      "Step 2: BSNE_Loss=0.1128,local_loss: 0.06670706466806083,global_loss: 0.046050459146499634\n",
      "Step 3: BSNE_Loss=0.2116,local_loss: 0.0812954010200393,global_loss: 0.13032770156860352\n",
      "Step 4: BSNE_Loss=0.0015,local_loss: 0.07114437046077546,global_loss: -0.06960134208202362\n",
      "Center node 7617 finished at step 5 with loss 0.0015\n",
      "\n",
      "=== Pretraining Epoch 6 (Center Node: 297) ===\n",
      "Step 0: BSNE_Loss=0.2838,local_loss: 0.03507777957407393,global_loss: 0.24867941439151764\n",
      "Step 1: BSNE_Loss=0.0854,local_loss: 0.050926743678207306,global_loss: 0.034431569278240204\n",
      "Step 2: BSNE_Loss=0.1988,local_loss: 0.042657267894305344,global_loss: 0.15610955655574799\n",
      "Step 3: BSNE_Loss=0.1434,local_loss: 0.047054837440201176,global_loss: 0.09637828916311264\n",
      "Step 4: BSNE_Loss=0.0534,local_loss: 0.06784978688944589,global_loss: -0.01443871483206749\n",
      "Step 5: BSNE_Loss=0.0946,local_loss: 0.055792097946638225,global_loss: 0.03879762440919876\n",
      "Step 6: BSNE_Loss=0.2123,local_loss: 0.034359615362072285,global_loss: 0.17795267701148987\n",
      "Step 7: BSNE_Loss=0.1537,local_loss: 0.038240406671419244,global_loss: 0.11548341810703278\n",
      "Step 8: BSNE_Loss=0.1796,local_loss: 0.019835552828983864,global_loss: 0.15974685549736023\n",
      "Step 9: BSNE_Loss=0.2629,local_loss: 0.029850822580835233,global_loss: 0.23300175368785858\n",
      "Step 10: BSNE_Loss=0.2913,local_loss: 0.04414939195713115,global_loss: 0.24713419377803802\n",
      "Step 11: BSNE_Loss=0.4382,local_loss: 0.042053025284496684,global_loss: 0.39614415168762207\n",
      "Step 12: BSNE_Loss=-0.0136,local_loss: 0.050912700579801864,global_loss: -0.06451880931854248\n",
      "Center node 297 finished at step 13 with loss -0.0136\n",
      "\n",
      "=== Pretraining Epoch 7 (Center Node: 10280) ===\n",
      "Step 0: BSNE_Loss=0.2360,local_loss: 0.037656534354977685,global_loss: 0.198308527469635\n",
      "Step 1: BSNE_Loss=0.2852,local_loss: 0.025327740247843008,global_loss: 0.259906142950058\n",
      "Step 2: BSNE_Loss=0.3130,local_loss: 0.03483901013741314,global_loss: 0.27818891406059265\n",
      "Step 3: BSNE_Loss=0.1508,local_loss: 0.050035974594639286,global_loss: 0.10072517395019531\n",
      "Step 4: BSNE_Loss=0.1450,local_loss: 0.020336269893542723,global_loss: 0.12462718039751053\n",
      "Step 5: BSNE_Loss=0.2279,local_loss: 0.030129981067142556,global_loss: 0.19779160618782043\n",
      "Step 6: BSNE_Loss=0.1698,local_loss: 0.03477147166133049,global_loss: 0.13507317006587982\n",
      "Step 7: BSNE_Loss=0.2563,local_loss: 0.05298843692764522,global_loss: 0.20333880186080933\n",
      "Step 8: BSNE_Loss=-0.1402,local_loss: 0.05873283718095473,global_loss: -0.19894054532051086\n",
      "Center node 10280 finished at step 9 with loss -0.1402\n",
      "\n",
      "=== Pretraining Epoch 8 (Center Node: 4189) ===\n",
      "Step 0: BSNE_Loss=0.1714,local_loss: 0.05374490045159187,global_loss: 0.11766233295202255\n",
      "Step 1: BSNE_Loss=0.3405,local_loss: 0.0788291426872085,global_loss: 0.2616881728172302\n",
      "Step 2: BSNE_Loss=0.0975,local_loss: 0.05322453204748043,global_loss: 0.044297657907009125\n",
      "Step 3: BSNE_Loss=0.0719,local_loss: 0.06526872728284562,global_loss: 0.006604628171771765\n",
      "Step 4: BSNE_Loss=0.0464,local_loss: 0.06894512950183387,global_loss: -0.02252357453107834\n",
      "Center node 4189 finished at step 5 with loss 0.0464\n",
      "\n",
      "=== Pretraining Epoch 9 (Center Node: 3682) ===\n",
      "Step 0: BSNE_Loss=0.6027,local_loss: 0.0382501311807726,global_loss: 0.5644389390945435\n",
      "Step 1: BSNE_Loss=0.7971,local_loss: 0.03334782198861299,global_loss: 0.7637317180633545\n",
      "Step 2: BSNE_Loss=0.5510,local_loss: 0.04491162659661688,global_loss: 0.5060948729515076\n",
      "Step 3: BSNE_Loss=0.8020,local_loss: 0.0339983402929397,global_loss: 0.767968475818634\n",
      "Step 4: BSNE_Loss=0.8223,local_loss: 0.040283191443612004,global_loss: 0.7820603847503662\n",
      "Step 5: BSNE_Loss=0.6553,local_loss: 0.03765189634008402,global_loss: 0.6175987124443054\n",
      "Step 6: BSNE_Loss=0.6794,local_loss: 0.042538964359216835,global_loss: 0.6368323564529419\n",
      "Step 7: BSNE_Loss=0.7181,local_loss: 0.03143395093775857,global_loss: 0.6866681575775146\n",
      "Step 8: BSNE_Loss=0.5455,local_loss: 0.04240775749601265,global_loss: 0.5030441880226135\n",
      "Step 9: BSNE_Loss=0.6962,local_loss: 0.030901268193260726,global_loss: 0.6652507185935974\n",
      "Step 10: BSNE_Loss=0.6904,local_loss: 0.03672860083741911,global_loss: 0.6536573767662048\n",
      "Step 11: BSNE_Loss=0.9383,local_loss: 0.038175555002409,global_loss: 0.9000849723815918\n",
      "Step 12: BSNE_Loss=0.6574,local_loss: 0.03082766429152644,global_loss: 0.6266161799430847\n",
      "Step 13: BSNE_Loss=0.4914,local_loss: 0.03494343705182324,global_loss: 0.4564226269721985\n",
      "Step 14: BSNE_Loss=0.6930,local_loss: 0.035456221892983766,global_loss: 0.657552182674408\n",
      "Step 15: BSNE_Loss=0.7034,local_loss: 0.03800058448502808,global_loss: 0.6654376983642578\n",
      "Step 16: BSNE_Loss=0.7351,local_loss: 0.031177927699432773,global_loss: 0.7039209008216858\n",
      "Step 17: BSNE_Loss=0.7208,local_loss: 0.03863314279857718,global_loss: 0.6822025775909424\n",
      "Step 18: BSNE_Loss=0.6468,local_loss: 0.0430233785902855,global_loss: 0.6037489175796509\n",
      "Step 19: BSNE_Loss=0.3042,local_loss: 0.038755946813807916,global_loss: 0.2654072344303131\n",
      "Step 20: BSNE_Loss=0.7573,local_loss: 0.03516496473846332,global_loss: 0.7221822142601013\n",
      "Step 21: BSNE_Loss=0.5624,local_loss: 0.03136748053302531,global_loss: 0.5309950113296509\n",
      "Step 22: BSNE_Loss=0.4085,local_loss: 0.033982981860525036,global_loss: 0.37453708052635193\n",
      "Step 23: BSNE_Loss=0.3554,local_loss: 0.03047314658115892,global_loss: 0.3248950242996216\n",
      "Step 24: BSNE_Loss=0.5854,local_loss: 0.039891181381372716,global_loss: 0.5454815626144409\n",
      "Step 25: BSNE_Loss=0.6602,local_loss: 0.04541821843918466,global_loss: 0.6147610545158386\n",
      "Step 26: BSNE_Loss=0.3384,local_loss: 0.037890574000316965,global_loss: 0.3005393147468567\n",
      "Step 27: BSNE_Loss=0.3717,local_loss: 0.03708666377657842,global_loss: 0.3346242308616638\n",
      "Step 28: BSNE_Loss=0.8102,local_loss: 0.03174311235435387,global_loss: 0.7784637212753296\n",
      "Step 29: BSNE_Loss=0.7894,local_loss: 0.028214338470628222,global_loss: 0.7611494064331055\n",
      "Step 30: BSNE_Loss=0.4771,local_loss: 0.03533356587013048,global_loss: 0.4417426288127899\n",
      "Step 31: BSNE_Loss=0.3556,local_loss: 0.033232610827761404,global_loss: 0.32241058349609375\n",
      "Step 32: BSNE_Loss=0.5339,local_loss: 0.030343770860217223,global_loss: 0.5035773515701294\n",
      "Step 33: BSNE_Loss=0.4799,local_loss: 0.04210581665372681,global_loss: 0.4378058910369873\n",
      "Step 34: BSNE_Loss=0.7146,local_loss: 0.039852432194795576,global_loss: 0.6747233867645264\n",
      "Step 35: BSNE_Loss=0.4858,local_loss: 0.030185929744229845,global_loss: 0.45561087131500244\n",
      "Step 36: BSNE_Loss=0.8004,local_loss: 0.032081366955174946,global_loss: 0.7682983875274658\n",
      "Step 37: BSNE_Loss=0.7360,local_loss: 0.04237589952020116,global_loss: 0.6935794353485107\n",
      "Step 38: BSNE_Loss=0.4528,local_loss: 0.04082083326839866,global_loss: 0.4119519889354706\n",
      "Step 39: BSNE_Loss=0.3252,local_loss: 0.039601295903630165,global_loss: 0.28559961915016174\n",
      "Step 40: BSNE_Loss=0.1583,local_loss: 0.03832325995437528,global_loss: 0.11998032033443451\n",
      "Step 41: BSNE_Loss=0.3771,local_loss: 0.03603283914236957,global_loss: 0.3410273790359497\n",
      "Step 42: BSNE_Loss=0.4464,local_loss: 0.03471592070431982,global_loss: 0.4116404354572296\n",
      "Step 43: BSNE_Loss=0.5220,local_loss: 0.02988369142652248,global_loss: 0.49214407801628113\n",
      "Step 44: BSNE_Loss=0.7493,local_loss: 0.029375449149877452,global_loss: 0.7199134826660156\n",
      "Step 45: BSNE_Loss=0.8248,local_loss: 0.03317466966945786,global_loss: 0.7916730046272278\n",
      "Step 46: BSNE_Loss=0.6314,local_loss: 0.030583871589817,global_loss: 0.600776195526123\n",
      "Step 47: BSNE_Loss=0.5979,local_loss: 0.03701460015355608,global_loss: 0.5608355402946472\n",
      "Step 48: BSNE_Loss=0.7118,local_loss: 0.04058272324200692,global_loss: 0.6712647676467896\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 49: BSNE_Loss=0.3626,local_loss: 0.03232273492496058,global_loss: 0.3302575647830963\n",
      "Step 50: BSNE_Loss=0.7278,local_loss: 0.03145245273660322,global_loss: 0.6963790655136108\n",
      "Step 51: BSNE_Loss=0.4152,local_loss: 0.026182886974274986,global_loss: 0.389010488986969\n",
      "Step 52: BSNE_Loss=0.4606,local_loss: 0.03921346290451112,global_loss: 0.42136266827583313\n",
      "Step 53: BSNE_Loss=0.3923,local_loss: 0.030513137936882408,global_loss: 0.361805260181427\n",
      "Step 54: BSNE_Loss=0.4227,local_loss: 0.039731659407105796,global_loss: 0.38298115134239197\n",
      "Step 55: BSNE_Loss=0.4591,local_loss: 0.029475251813213574,global_loss: 0.4295842945575714\n",
      "Step 56: BSNE_Loss=0.3535,local_loss: 0.04171184798590819,global_loss: 0.3117947280406952\n",
      "Step 57: BSNE_Loss=0.7806,local_loss: 0.025759234911218544,global_loss: 0.7548452019691467\n",
      "Step 58: BSNE_Loss=0.5948,local_loss: 0.029376138543791813,global_loss: 0.5654288530349731\n",
      "Step 59: BSNE_Loss=0.5737,local_loss: 0.0356586162094355,global_loss: 0.5379970073699951\n",
      "Step 60: BSNE_Loss=0.4351,local_loss: 0.029897420315860325,global_loss: 0.40524184703826904\n",
      "Step 61: BSNE_Loss=0.7983,local_loss: 0.02523828356758922,global_loss: 0.7730392813682556\n",
      "Step 62: BSNE_Loss=0.7472,local_loss: 0.0327175208950155,global_loss: 0.7144536375999451\n",
      "Step 63: BSNE_Loss=0.4846,local_loss: 0.03778586544145806,global_loss: 0.4467712938785553\n",
      "Step 64: BSNE_Loss=0.3656,local_loss: 0.031822321922430175,global_loss: 0.3337850272655487\n",
      "Step 65: BSNE_Loss=0.6604,local_loss: 0.030489079618377726,global_loss: 0.6298853754997253\n",
      "Step 66: BSNE_Loss=0.4774,local_loss: 0.033214004113140155,global_loss: 0.4441397786140442\n",
      "Step 67: BSNE_Loss=0.5412,local_loss: 0.03004219187736217,global_loss: 0.5112001299858093\n",
      "Step 68: BSNE_Loss=0.7065,local_loss: 0.029728295101759457,global_loss: 0.6768211722373962\n",
      "Step 69: BSNE_Loss=0.6840,local_loss: 0.033080258213795025,global_loss: 0.6508997082710266\n",
      "Step 70: BSNE_Loss=0.6004,local_loss: 0.04340254698727834,global_loss: 0.5570317506790161\n",
      "Step 71: BSNE_Loss=0.4073,local_loss: 0.038356228174511335,global_loss: 0.3689400255680084\n",
      "Step 72: BSNE_Loss=0.5750,local_loss: 0.03940923135958267,global_loss: 0.5356338620185852\n",
      "Step 73: BSNE_Loss=0.5499,local_loss: 0.03598245559316985,global_loss: 0.513940691947937\n",
      "Step 74: BSNE_Loss=0.5730,local_loss: 0.0367859557247691,global_loss: 0.5362502932548523\n",
      "Step 75: BSNE_Loss=0.6490,local_loss: 0.035938138646460484,global_loss: 0.6130632758140564\n",
      "Step 76: BSNE_Loss=0.5889,local_loss: 0.030785927755138512,global_loss: 0.558122456073761\n",
      "Step 77: BSNE_Loss=0.5200,local_loss: 0.0348646598509359,global_loss: 0.48517411947250366\n",
      "Step 78: BSNE_Loss=0.5175,local_loss: 0.03447784744775326,global_loss: 0.4830184876918793\n",
      "Step 79: BSNE_Loss=0.3201,local_loss: 0.037679540636345654,global_loss: 0.2823743522167206\n",
      "Step 80: BSNE_Loss=0.6341,local_loss: 0.03315637334945144,global_loss: 0.6009566187858582\n",
      "Step 81: BSNE_Loss=0.5770,local_loss: 0.0338891535679491,global_loss: 0.5431064367294312\n",
      "Step 82: BSNE_Loss=0.4305,local_loss: 0.02792086924668113,global_loss: 0.40257591009140015\n",
      "Step 83: BSNE_Loss=0.7643,local_loss: 0.03862052612087795,global_loss: 0.7256819009780884\n",
      "Step 84: BSNE_Loss=0.4931,local_loss: 0.03970735737006758,global_loss: 0.4534178376197815\n",
      "Step 85: BSNE_Loss=0.6447,local_loss: 0.03686650359466449,global_loss: 0.6077931523323059\n",
      "Step 86: BSNE_Loss=0.7066,local_loss: 0.02676072429460604,global_loss: 0.6798839569091797\n",
      "Step 87: BSNE_Loss=0.4796,local_loss: 0.031218177101755874,global_loss: 0.44842150807380676\n",
      "Step 88: BSNE_Loss=0.5112,local_loss: 0.03826533657166267,global_loss: 0.47288745641708374\n",
      "Step 89: BSNE_Loss=0.3379,local_loss: 0.037654220464325275,global_loss: 0.3002780079841614\n",
      "Step 90: BSNE_Loss=0.6018,local_loss: 0.03544333810700098,global_loss: 0.5663875937461853\n",
      "Step 91: BSNE_Loss=0.4926,local_loss: 0.03209437484612613,global_loss: 0.4604605436325073\n",
      "Step 92: BSNE_Loss=0.6999,local_loss: 0.03766456314531136,global_loss: 0.6621910333633423\n",
      "Step 93: BSNE_Loss=0.6622,local_loss: 0.028362116958322484,global_loss: 0.6338703632354736\n",
      "Step 94: BSNE_Loss=0.5965,local_loss: 0.03263576446724578,global_loss: 0.5638984441757202\n",
      "Step 95: BSNE_Loss=0.5153,local_loss: 0.036330425593478405,global_loss: 0.47892314195632935\n",
      "Step 96: BSNE_Loss=0.5970,local_loss: 0.03034468159974621,global_loss: 0.5666083693504333\n",
      "Step 97: BSNE_Loss=0.3862,local_loss: 0.03016128009980858,global_loss: 0.3560578525066376\n",
      "Step 98: BSNE_Loss=0.4358,local_loss: 0.03527005031228966,global_loss: 0.4005032479763031\n",
      "Step 99: BSNE_Loss=0.6701,local_loss: 0.02865873053518255,global_loss: 0.641409158706665\n",
      "Step 100: BSNE_Loss=0.3815,local_loss: 0.028210006975260167,global_loss: 0.35330355167388916\n",
      "Step 101: BSNE_Loss=0.4000,local_loss: 0.030625584638101364,global_loss: 0.36935892701148987\n",
      "Step 102: BSNE_Loss=0.3340,local_loss: 0.02617330006545744,global_loss: 0.3078446388244629\n",
      "Step 103: BSNE_Loss=0.4544,local_loss: 0.03179947618584289,global_loss: 0.4226419925689697\n",
      "Step 104: BSNE_Loss=0.5587,local_loss: 0.02939835490120296,global_loss: 0.5293214321136475\n",
      "Step 105: BSNE_Loss=0.5170,local_loss: 0.03237030449128739,global_loss: 0.4846547245979309\n",
      "Step 106: BSNE_Loss=0.5046,local_loss: 0.02999100378978546,global_loss: 0.4745609164237976\n",
      "Step 107: BSNE_Loss=0.4900,local_loss: 0.034312102386246955,global_loss: 0.45570725202560425\n",
      "Step 108: BSNE_Loss=0.5880,local_loss: 0.041292028133658884,global_loss: 0.5466788411140442\n",
      "Step 109: BSNE_Loss=0.6268,local_loss: 0.03709569147922987,global_loss: 0.5896952152252197\n",
      "Step 110: BSNE_Loss=0.5571,local_loss: 0.035170099126783025,global_loss: 0.5218881964683533\n",
      "Step 111: BSNE_Loss=0.3142,local_loss: 0.029856723198627623,global_loss: 0.2843908965587616\n",
      "Step 112: BSNE_Loss=0.3831,local_loss: 0.03626026633996984,global_loss: 0.34686771035194397\n",
      "Step 113: BSNE_Loss=0.5531,local_loss: 0.032825232145715165,global_loss: 0.5203167796134949\n",
      "Step 114: BSNE_Loss=0.5352,local_loss: 0.043079952973908126,global_loss: 0.4920862317085266\n",
      "Step 115: BSNE_Loss=0.4435,local_loss: 0.040112421197614295,global_loss: 0.4033908545970917\n",
      "Step 116: BSNE_Loss=0.3234,local_loss: 0.03087307788827187,global_loss: 0.29254046082496643\n",
      "Step 117: BSNE_Loss=0.6032,local_loss: 0.037569720961894246,global_loss: 0.5655820369720459\n",
      "Step 118: BSNE_Loss=0.5211,local_loss: 0.04085959645532789,global_loss: 0.4802235960960388\n",
      "Step 119: BSNE_Loss=0.4642,local_loss: 0.040102747443595585,global_loss: 0.42406657338142395\n",
      "Step 120: BSNE_Loss=0.6266,local_loss: 0.03389058880511366,global_loss: 0.5927530527114868\n",
      "Step 121: BSNE_Loss=0.4055,local_loss: 0.03423606623668385,global_loss: 0.37121856212615967\n",
      "Step 122: BSNE_Loss=0.5351,local_loss: 0.03385786541247747,global_loss: 0.5012418031692505\n",
      "Step 123: BSNE_Loss=0.4218,local_loss: 0.03518697158956125,global_loss: 0.3865880072116852\n",
      "Step 124: BSNE_Loss=0.4854,local_loss: 0.03140266632887178,global_loss: 0.4540416896343231\n",
      "Step 125: BSNE_Loss=0.4428,local_loss: 0.03809885806171999,global_loss: 0.40467920899391174\n",
      "Step 126: BSNE_Loss=0.4920,local_loss: 0.047586125030618855,global_loss: 0.44446176290512085\n",
      "Step 127: BSNE_Loss=0.3104,local_loss: 0.030907349206139474,global_loss: 0.27950412034988403\n",
      "Step 128: BSNE_Loss=0.4791,local_loss: 0.052737972270103906,global_loss: 0.42635807394981384\n",
      "Step 129: BSNE_Loss=0.4417,local_loss: 0.037578587867150424,global_loss: 0.40409326553344727\n",
      "Step 130: BSNE_Loss=0.5549,local_loss: 0.053418896561740474,global_loss: 0.5014801621437073\n",
      "Step 131: BSNE_Loss=0.4793,local_loss: 0.04358828623489728,global_loss: 0.43569308519363403\n",
      "Step 132: BSNE_Loss=0.3208,local_loss: 0.03478818332642651,global_loss: 0.2860546410083771\n",
      "Step 133: BSNE_Loss=0.4891,local_loss: 0.03661843226295242,global_loss: 0.4524657130241394\n",
      "Step 134: BSNE_Loss=0.4890,local_loss: 0.04847186078736803,global_loss: 0.4405689537525177\n",
      "Step 135: BSNE_Loss=0.5214,local_loss: 0.04788202361906504,global_loss: 0.4735603928565979\n",
      "Step 136: BSNE_Loss=0.3715,local_loss: 0.04566304914862865,global_loss: 0.3258702754974365\n",
      "Step 137: BSNE_Loss=0.4496,local_loss: 0.048871787100758825,global_loss: 0.4007556736469269\n",
      "Step 138: BSNE_Loss=0.3460,local_loss: 0.03849239358426078,global_loss: 0.3074638545513153\n",
      "Step 139: BSNE_Loss=0.4782,local_loss: 0.048063592061569925,global_loss: 0.43014517426490784\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 140: BSNE_Loss=0.5251,local_loss: 0.04593631436094874,global_loss: 0.4792029857635498\n",
      "Step 141: BSNE_Loss=0.4310,local_loss: 0.040473132174341744,global_loss: 0.39055967330932617\n",
      "Step 142: BSNE_Loss=0.4435,local_loss: 0.04874932070998054,global_loss: 0.39472031593322754\n",
      "Step 143: BSNE_Loss=0.3865,local_loss: 0.05323373509832109,global_loss: 0.33330535888671875\n",
      "Step 144: BSNE_Loss=0.4452,local_loss: 0.048330107623794104,global_loss: 0.39686304330825806\n",
      "Step 145: BSNE_Loss=0.3849,local_loss: 0.05417609835221661,global_loss: 0.33068329095840454\n",
      "Step 146: BSNE_Loss=0.3884,local_loss: 0.05497414093391703,global_loss: 0.3334556818008423\n",
      "Step 147: BSNE_Loss=0.3672,local_loss: 0.04836133778265092,global_loss: 0.31887179613113403\n",
      "Step 148: BSNE_Loss=0.6002,local_loss: 0.06030693751419017,global_loss: 0.5399222373962402\n",
      "Step 149: BSNE_Loss=0.3783,local_loss: 0.05524413873041711,global_loss: 0.32307106256484985\n",
      "Step 150: BSNE_Loss=0.4621,local_loss: 0.05922119695160117,global_loss: 0.40292447805404663\n",
      "Step 151: BSNE_Loss=0.3804,local_loss: 0.06718678535350256,global_loss: 0.3132208585739136\n",
      "Step 152: BSNE_Loss=0.3168,local_loss: 0.045554518691714604,global_loss: 0.2712097465991974\n",
      "Step 153: BSNE_Loss=0.4322,local_loss: 0.07049511093309563,global_loss: 0.361666202545166\n",
      "Step 154: BSNE_Loss=0.3907,local_loss: 0.05951515898138323,global_loss: 0.33116886019706726\n",
      "Step 155: BSNE_Loss=0.3835,local_loss: 0.06411050714229394,global_loss: 0.31937092542648315\n",
      "Step 156: BSNE_Loss=0.2871,local_loss: 0.07107574308068632,global_loss: 0.2160610556602478\n",
      "Step 157: BSNE_Loss=0.2143,local_loss: 0.05033953742336464,global_loss: 0.1640065759420395\n",
      "Step 158: BSNE_Loss=0.4919,local_loss: 0.07583018369979815,global_loss: 0.41609078645706177\n",
      "Step 159: BSNE_Loss=0.3018,local_loss: 0.05266795669931716,global_loss: 0.24917906522750854\n",
      "Step 160: BSNE_Loss=0.4649,local_loss: 0.07475683425974697,global_loss: 0.3901306688785553\n",
      "Step 161: BSNE_Loss=0.4584,local_loss: 0.06501711069852054,global_loss: 0.393372505903244\n",
      "Step 162: BSNE_Loss=0.4320,local_loss: 0.07086729531285982,global_loss: 0.3611777722835541\n",
      "Step 163: BSNE_Loss=0.5006,local_loss: 0.045721792281548254,global_loss: 0.45488065481185913\n",
      "Step 164: BSNE_Loss=0.4129,local_loss: 0.07354616517587807,global_loss: 0.3393872380256653\n",
      "Step 165: BSNE_Loss=0.3420,local_loss: 0.056127995753108804,global_loss: 0.2858288586139679\n",
      "Step 166: BSNE_Loss=0.3608,local_loss: 0.06150192122771633,global_loss: 0.2993321716785431\n",
      "Step 167: BSNE_Loss=0.4280,local_loss: 0.07400548154457168,global_loss: 0.3539535105228424\n",
      "Step 168: BSNE_Loss=0.3381,local_loss: 0.06825678353306322,global_loss: 0.26984861493110657\n",
      "Step 169: BSNE_Loss=0.3872,local_loss: 0.0758383722967472,global_loss: 0.31133851408958435\n",
      "Step 170: BSNE_Loss=0.4726,local_loss: 0.07844067634111915,global_loss: 0.3941516578197479\n",
      "Step 171: BSNE_Loss=0.4174,local_loss: 0.06210086463146331,global_loss: 0.35529330372810364\n",
      "Step 172: BSNE_Loss=0.3188,local_loss: 0.09118597854275198,global_loss: 0.22763128578662872\n",
      "Step 173: BSNE_Loss=0.4260,local_loss: 0.07728903305032701,global_loss: 0.34868282079696655\n",
      "Step 174: BSNE_Loss=0.3538,local_loss: 0.08091354900827163,global_loss: 0.2729286849498749\n",
      "Step 175: BSNE_Loss=0.4221,local_loss: 0.07240233347499467,global_loss: 0.3496928811073303\n",
      "Step 176: BSNE_Loss=0.3992,local_loss: 0.09026154365674881,global_loss: 0.30890536308288574\n",
      "Step 177: BSNE_Loss=0.3161,local_loss: 0.07347994905087925,global_loss: 0.24258622527122498\n",
      "Step 178: BSNE_Loss=0.3739,local_loss: 0.06318478393485255,global_loss: 0.31069138646125793\n",
      "Step 179: BSNE_Loss=0.3400,local_loss: 0.09724581773363274,global_loss: 0.24279122054576874\n",
      "Step 180: BSNE_Loss=0.2253,local_loss: 0.0855509015997454,global_loss: 0.13972336053848267\n",
      "Step 181: BSNE_Loss=0.3338,local_loss: 0.07021626198038583,global_loss: 0.2635765075683594\n",
      "Step 182: BSNE_Loss=0.1268,local_loss: 0.07503628330002149,global_loss: 0.05177871137857437\n",
      "Step 183: BSNE_Loss=0.3206,local_loss: 0.09275509133808989,global_loss: 0.2278800904750824\n",
      "Step 184: BSNE_Loss=0.2872,local_loss: 0.09067024649641897,global_loss: 0.196485698223114\n",
      "Step 185: BSNE_Loss=0.3879,local_loss: 0.07567262590136636,global_loss: 0.31221094727516174\n",
      "Step 186: BSNE_Loss=0.2813,local_loss: 0.09807438275666616,global_loss: 0.18319422006607056\n",
      "Step 187: BSNE_Loss=0.1731,local_loss: 0.08450846849039224,global_loss: 0.08862246572971344\n",
      "Step 188: BSNE_Loss=0.3578,local_loss: 0.08892129411556077,global_loss: 0.26887181401252747\n",
      "Step 189: BSNE_Loss=0.3169,local_loss: 0.09003216862282999,global_loss: 0.2269050031900406\n",
      "Step 190: BSNE_Loss=0.3387,local_loss: 0.08489708464049177,global_loss: 0.25380590558052063\n",
      "Step 191: BSNE_Loss=0.2891,local_loss: 0.09331059521328858,global_loss: 0.19581788778305054\n",
      "Step 192: BSNE_Loss=0.2931,local_loss: 0.09282391572603885,global_loss: 0.20031823217868805\n",
      "Step 193: BSNE_Loss=0.2987,local_loss: 0.09380411390353868,global_loss: 0.20493896305561066\n",
      "Step 194: BSNE_Loss=0.2188,local_loss: 0.10280078723927343,global_loss: 0.11604350805282593\n",
      "Step 195: BSNE_Loss=0.1530,local_loss: 0.09045568691149164,global_loss: 0.06255792826414108\n",
      "Step 196: BSNE_Loss=0.2683,local_loss: 0.08117246660909865,global_loss: 0.18714794516563416\n",
      "Step 197: BSNE_Loss=0.1874,local_loss: 0.0772626118872081,global_loss: 0.11013229191303253\n",
      "Step 198: BSNE_Loss=0.3305,local_loss: 0.09727121201187376,global_loss: 0.23324866592884064\n",
      "Step 199: BSNE_Loss=0.2274,local_loss: 0.10659555714910689,global_loss: 0.12079021334648132\n",
      "Center node 3682 finished at step 200 with loss 0.2274\n",
      "\n",
      "=== Pretraining Epoch 10 (Center Node: 9729) ===\n",
      "Step 0: BSNE_Loss=0.4337,local_loss: 0.021914370185942535,global_loss: 0.4118213653564453\n",
      "Step 1: BSNE_Loss=0.5993,local_loss: 0.026722702853903175,global_loss: 0.5725748538970947\n",
      "Step 2: BSNE_Loss=0.5640,local_loss: 0.021197614624965826,global_loss: 0.5427683591842651\n",
      "Step 3: BSNE_Loss=0.6239,local_loss: 0.02635781305505237,global_loss: 0.5975794196128845\n",
      "Step 4: BSNE_Loss=0.6350,local_loss: 0.023445364239093484,global_loss: 0.6115526556968689\n",
      "Step 5: BSNE_Loss=0.3724,local_loss: 0.024779885195577885,global_loss: 0.3476080894470215\n",
      "Step 6: BSNE_Loss=0.5701,local_loss: 0.033470229958049706,global_loss: 0.5365999937057495\n",
      "Step 7: BSNE_Loss=0.5672,local_loss: 0.01585190943410862,global_loss: 0.5513799786567688\n",
      "Step 8: BSNE_Loss=0.7787,local_loss: 0.02817121217417666,global_loss: 0.7505195140838623\n",
      "Step 9: BSNE_Loss=0.5645,local_loss: 0.028786378785791277,global_loss: 0.5356771349906921\n",
      "Step 10: BSNE_Loss=0.4364,local_loss: 0.028611737979674246,global_loss: 0.407808393239975\n",
      "Step 11: BSNE_Loss=0.4432,local_loss: 0.02860883388300992,global_loss: 0.41462740302085876\n",
      "Step 12: BSNE_Loss=0.5462,local_loss: 0.02918850133087681,global_loss: 0.517033576965332\n",
      "Step 13: BSNE_Loss=0.5033,local_loss: 0.027464833027424237,global_loss: 0.47584688663482666\n",
      "Step 14: BSNE_Loss=0.3311,local_loss: 0.02801054884065641,global_loss: 0.3030797839164734\n",
      "Step 15: BSNE_Loss=0.6074,local_loss: 0.027375265297112133,global_loss: 0.5800190567970276\n",
      "Step 16: BSNE_Loss=0.3687,local_loss: 0.028375996935331188,global_loss: 0.34035083651542664\n",
      "Step 17: BSNE_Loss=0.4069,local_loss: 0.029989463075091975,global_loss: 0.3768685460090637\n",
      "Step 18: BSNE_Loss=0.4595,local_loss: 0.027385268709710236,global_loss: 0.4321087598800659\n",
      "Step 19: BSNE_Loss=0.4019,local_loss: 0.028232101741753328,global_loss: 0.3736395239830017\n",
      "Step 20: BSNE_Loss=0.4077,local_loss: 0.030168523379539586,global_loss: 0.37754184007644653\n",
      "Step 21: BSNE_Loss=0.5215,local_loss: 0.027553093620181695,global_loss: 0.49393314123153687\n",
      "Step 22: BSNE_Loss=0.5371,local_loss: 0.03292089430187732,global_loss: 0.5041394829750061\n",
      "Step 23: BSNE_Loss=0.3938,local_loss: 0.030132151864196825,global_loss: 0.3636975586414337\n",
      "Step 24: BSNE_Loss=0.4058,local_loss: 0.029055953831625722,global_loss: 0.37675461173057556\n",
      "Step 25: BSNE_Loss=0.3782,local_loss: 0.02919897748541651,global_loss: 0.34899139404296875\n",
      "Step 26: BSNE_Loss=0.4302,local_loss: 0.03244713078565421,global_loss: 0.3977617025375366\n",
      "Step 27: BSNE_Loss=0.4048,local_loss: 0.030287868926567405,global_loss: 0.37447503209114075\n",
      "Step 28: BSNE_Loss=0.4025,local_loss: 0.03386757942607014,global_loss: 0.36867594718933105\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 29: BSNE_Loss=0.4601,local_loss: 0.03137044514064452,global_loss: 0.42873722314834595\n",
      "Step 30: BSNE_Loss=0.6048,local_loss: 0.027130437195079033,global_loss: 0.5776315331459045\n",
      "Step 31: BSNE_Loss=0.5499,local_loss: 0.02824717281640566,global_loss: 0.5216947197914124\n",
      "Step 32: BSNE_Loss=0.4536,local_loss: 0.02800255110380672,global_loss: 0.4255625009536743\n",
      "Step 33: BSNE_Loss=0.4221,local_loss: 0.03216901107713973,global_loss: 0.3899725675582886\n",
      "Step 34: BSNE_Loss=0.4773,local_loss: 0.033714152158093105,global_loss: 0.44361865520477295\n",
      "Step 35: BSNE_Loss=0.4173,local_loss: 0.033664608861353376,global_loss: 0.3835957646369934\n",
      "Step 36: BSNE_Loss=0.4325,local_loss: 0.033587370901167854,global_loss: 0.39886996150016785\n",
      "Step 37: BSNE_Loss=0.4204,local_loss: 0.03512793740347229,global_loss: 0.38531967997550964\n",
      "Step 38: BSNE_Loss=0.4520,local_loss: 0.036843170566827926,global_loss: 0.41510725021362305\n",
      "Step 39: BSNE_Loss=0.4421,local_loss: 0.035328685542225464,global_loss: 0.4067467451095581\n",
      "Step 40: BSNE_Loss=0.4286,local_loss: 0.03140685133795413,global_loss: 0.39715099334716797\n",
      "Step 41: BSNE_Loss=0.4250,local_loss: 0.036696240169407306,global_loss: 0.3883044421672821\n",
      "Step 42: BSNE_Loss=0.4256,local_loss: 0.03326978631732172,global_loss: 0.39236947894096375\n",
      "Step 43: BSNE_Loss=0.4931,local_loss: 0.0358315707120449,global_loss: 0.45728474855422974\n",
      "Step 44: BSNE_Loss=0.3899,local_loss: 0.034998776199066134,global_loss: 0.35490626096725464\n",
      "Step 45: BSNE_Loss=0.4329,local_loss: 0.031685115621639975,global_loss: 0.4011739194393158\n",
      "Step 46: BSNE_Loss=0.4855,local_loss: 0.030734326708014743,global_loss: 0.45476484298706055\n",
      "Step 47: BSNE_Loss=0.4179,local_loss: 0.03261309935613915,global_loss: 0.3853219449520111\n",
      "Step 48: BSNE_Loss=0.6071,local_loss: 0.03167753086960924,global_loss: 0.5753735899925232\n",
      "Step 49: BSNE_Loss=0.4919,local_loss: 0.03288544832998924,global_loss: 0.45900195837020874\n",
      "Step 50: BSNE_Loss=0.6132,local_loss: 0.03170771358773441,global_loss: 0.5814993977546692\n",
      "Step 51: BSNE_Loss=0.3659,local_loss: 0.03330375420418827,global_loss: 0.332606703042984\n",
      "Step 52: BSNE_Loss=0.3619,local_loss: 0.0357356843792084,global_loss: 0.32615578174591064\n",
      "Step 53: BSNE_Loss=0.4448,local_loss: 0.033959387467980044,global_loss: 0.41079992055892944\n",
      "Step 54: BSNE_Loss=0.4891,local_loss: 0.03514033360608723,global_loss: 0.45395588874816895\n",
      "Step 55: BSNE_Loss=0.4401,local_loss: 0.03787346663495992,global_loss: 0.40226247906684875\n",
      "Step 56: BSNE_Loss=0.4572,local_loss: 0.03447420839821691,global_loss: 0.42272862792015076\n",
      "Step 57: BSNE_Loss=0.3413,local_loss: 0.03635984310287191,global_loss: 0.30490541458129883\n",
      "Step 58: BSNE_Loss=0.3134,local_loss: 0.03321784550876325,global_loss: 0.28017789125442505\n",
      "Step 59: BSNE_Loss=0.4610,local_loss: 0.038816582644772196,global_loss: 0.42217814922332764\n",
      "Step 60: BSNE_Loss=0.4179,local_loss: 0.03961686745338225,global_loss: 0.378317654132843\n",
      "Step 61: BSNE_Loss=0.3290,local_loss: 0.03585139255518678,global_loss: 0.2931804060935974\n",
      "Step 62: BSNE_Loss=0.4408,local_loss: 0.037317599042348895,global_loss: 0.40348243713378906\n",
      "Step 63: BSNE_Loss=0.3986,local_loss: 0.03518546138576937,global_loss: 0.3634496033191681\n",
      "Step 64: BSNE_Loss=0.4443,local_loss: 0.034641297711918366,global_loss: 0.40965279936790466\n",
      "Step 65: BSNE_Loss=0.4321,local_loss: 0.03804975599478417,global_loss: 0.39406540989875793\n",
      "Step 66: BSNE_Loss=0.4692,local_loss: 0.03986857992546449,global_loss: 0.42933717370033264\n",
      "Step 67: BSNE_Loss=0.3311,local_loss: 0.04020945686132429,global_loss: 0.2908437252044678\n",
      "Step 68: BSNE_Loss=0.4959,local_loss: 0.04034130807414128,global_loss: 0.45552772283554077\n",
      "Step 69: BSNE_Loss=0.4340,local_loss: 0.04496394692794868,global_loss: 0.38907673954963684\n",
      "Step 70: BSNE_Loss=0.4289,local_loss: 0.04413807481825562,global_loss: 0.38477087020874023\n",
      "Step 71: BSNE_Loss=0.3701,local_loss: 0.03827336587813397,global_loss: 0.3318551480770111\n",
      "Step 72: BSNE_Loss=0.4056,local_loss: 0.04612542670789583,global_loss: 0.35950997471809387\n",
      "Step 73: BSNE_Loss=0.5062,local_loss: 0.03937688664787504,global_loss: 0.466804563999176\n",
      "Step 74: BSNE_Loss=0.4169,local_loss: 0.030747914806119277,global_loss: 0.38610512018203735\n",
      "Step 75: BSNE_Loss=0.4453,local_loss: 0.03812956245938538,global_loss: 0.40713804960250854\n",
      "Step 76: BSNE_Loss=0.3520,local_loss: 0.040636083207249235,global_loss: 0.31137657165527344\n",
      "Step 77: BSNE_Loss=0.3820,local_loss: 0.04278028961771575,global_loss: 0.33918997645378113\n",
      "Step 78: BSNE_Loss=0.4615,local_loss: 0.04768014087724132,global_loss: 0.4138163626194\n",
      "Step 79: BSNE_Loss=0.4279,local_loss: 0.051007950014074536,global_loss: 0.3768765330314636\n",
      "Step 80: BSNE_Loss=0.3231,local_loss: 0.051822082632441244,global_loss: 0.27128201723098755\n",
      "Step 81: BSNE_Loss=0.4263,local_loss: 0.04957606719543758,global_loss: 0.3767503499984741\n",
      "Step 82: BSNE_Loss=0.4156,local_loss: 0.047464095124909206,global_loss: 0.3681769073009491\n",
      "Step 83: BSNE_Loss=0.4828,local_loss: 0.04780655504026668,global_loss: 0.4349609315395355\n",
      "Step 84: BSNE_Loss=0.2685,local_loss: 0.03871331236166551,global_loss: 0.22978855669498444\n",
      "Step 85: BSNE_Loss=0.3103,local_loss: 0.04984564909880508,global_loss: 0.260449081659317\n",
      "Step 86: BSNE_Loss=0.2314,local_loss: 0.04170039885208661,global_loss: 0.18969979882240295\n",
      "Step 87: BSNE_Loss=0.3079,local_loss: 0.051394139079781256,global_loss: 0.256483793258667\n",
      "Step 88: BSNE_Loss=0.4615,local_loss: 0.04286787151558005,global_loss: 0.4186778664588928\n",
      "Step 89: BSNE_Loss=0.2179,local_loss: 0.042933017169542245,global_loss: 0.17496372759342194\n",
      "Step 90: BSNE_Loss=0.4048,local_loss: 0.06148197212667004,global_loss: 0.34327587485313416\n",
      "Step 91: BSNE_Loss=0.4016,local_loss: 0.05434317910778341,global_loss: 0.34728914499282837\n",
      "Step 92: BSNE_Loss=0.5606,local_loss: 0.05515290397585634,global_loss: 0.5054361820220947\n",
      "Step 93: BSNE_Loss=0.3411,local_loss: 0.05094845188377962,global_loss: 0.29015007615089417\n",
      "Step 94: BSNE_Loss=0.4350,local_loss: 0.06350699790925188,global_loss: 0.3715131878852844\n",
      "Step 95: BSNE_Loss=0.6203,local_loss: 0.06587868272378095,global_loss: 0.5544208884239197\n",
      "Step 96: BSNE_Loss=0.2007,local_loss: 0.06326077118221105,global_loss: 0.1374795138835907\n",
      "Step 97: BSNE_Loss=0.4740,local_loss: 0.07438068802597514,global_loss: 0.39958226680755615\n",
      "Step 98: BSNE_Loss=0.4415,local_loss: 0.061129421705414744,global_loss: 0.38039106130599976\n",
      "Step 99: BSNE_Loss=0.4299,local_loss: 0.049831244690210885,global_loss: 0.38006529211997986\n",
      "Step 100: BSNE_Loss=0.2500,local_loss: 0.06108955870322072,global_loss: 0.18893101811408997\n",
      "Step 101: BSNE_Loss=0.3628,local_loss: 0.06696801164898517,global_loss: 0.29585519433021545\n",
      "Step 102: BSNE_Loss=0.3796,local_loss: 0.06402402082259762,global_loss: 0.3155446946620941\n",
      "Step 103: BSNE_Loss=0.4296,local_loss: 0.07196416684730743,global_loss: 0.35758787393569946\n",
      "Step 104: BSNE_Loss=0.2799,local_loss: 0.07527545992364836,global_loss: 0.20460374653339386\n",
      "Step 105: BSNE_Loss=0.4021,local_loss: 0.07212844131786095,global_loss: 0.3299623429775238\n",
      "Step 106: BSNE_Loss=0.0435,local_loss: 0.07117053440970822,global_loss: -0.027649544179439545\n",
      "Center node 9729 finished at step 107 with loss 0.0435\n",
      "\n",
      "=== Pretraining Epoch 11 (Center Node: 52) ===\n",
      "Step 0: BSNE_Loss=0.1682,local_loss: 0.034197239556864024,global_loss: 0.13396412134170532\n",
      "Step 1: BSNE_Loss=0.0155,local_loss: 0.0498221167307441,global_loss: -0.034348778426647186\n",
      "Center node 52 finished at step 2 with loss 0.0155\n",
      "\n",
      "=== Pretraining Epoch 12 (Center Node: 4174) ===\n",
      "Step 0: BSNE_Loss=0.6580,local_loss: 0.04028841270331554,global_loss: 0.6176996231079102\n",
      "Step 1: BSNE_Loss=0.6832,local_loss: 0.04413964204354768,global_loss: 0.6390112042427063\n",
      "Step 2: BSNE_Loss=0.7267,local_loss: 0.03881817748715566,global_loss: 0.6878542900085449\n",
      "Step 3: BSNE_Loss=0.8155,local_loss: 0.03946922431675911,global_loss: 0.7760199904441833\n",
      "Step 4: BSNE_Loss=0.5144,local_loss: 0.035068634294520055,global_loss: 0.4793318510055542\n",
      "Step 5: BSNE_Loss=0.6790,local_loss: 0.03373279450090165,global_loss: 0.6452860236167908\n",
      "Step 6: BSNE_Loss=0.6831,local_loss: 0.03563475457344614,global_loss: 0.647449791431427\n",
      "Step 7: BSNE_Loss=0.7655,local_loss: 0.029037906412197344,global_loss: 0.7364486455917358\n",
      "Step 8: BSNE_Loss=0.6169,local_loss: 0.032144083165061337,global_loss: 0.5847702622413635\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 9: BSNE_Loss=0.6805,local_loss: 0.01998292558631059,global_loss: 0.660529613494873\n",
      "Step 10: BSNE_Loss=0.5493,local_loss: 0.025932562058288068,global_loss: 0.5233428478240967\n",
      "Step 11: BSNE_Loss=0.5550,local_loss: 0.027816005126870828,global_loss: 0.5271945595741272\n",
      "Step 12: BSNE_Loss=0.5239,local_loss: 0.022160201285089054,global_loss: 0.5017229914665222\n",
      "Step 13: BSNE_Loss=0.5143,local_loss: 0.017869225115057664,global_loss: 0.4964326322078705\n",
      "Step 14: BSNE_Loss=0.6149,local_loss: 0.014976248078947638,global_loss: 0.5999349355697632\n",
      "Step 15: BSNE_Loss=0.4676,local_loss: 0.018839298971834784,global_loss: 0.4487212896347046\n",
      "Step 16: BSNE_Loss=0.4594,local_loss: 0.019469492748887138,global_loss: 0.4399685859680176\n",
      "Step 17: BSNE_Loss=0.5696,local_loss: 0.013147799235989133,global_loss: 0.5564031600952148\n",
      "Step 18: BSNE_Loss=0.4882,local_loss: 0.018669246300315523,global_loss: 0.46950775384902954\n",
      "Step 19: BSNE_Loss=0.5053,local_loss: 0.016552883427672242,global_loss: 0.4887895882129669\n",
      "Step 20: BSNE_Loss=0.5426,local_loss: 0.013304984274165026,global_loss: 0.5292561650276184\n",
      "Step 21: BSNE_Loss=0.4465,local_loss: 0.020655818981978137,global_loss: 0.42589083313941956\n",
      "Step 22: BSNE_Loss=0.5068,local_loss: 0.015565494601500647,global_loss: 0.4912067651748657\n",
      "Step 23: BSNE_Loss=0.5010,local_loss: 0.016006195041228615,global_loss: 0.48495715856552124\n",
      "Step 24: BSNE_Loss=0.4911,local_loss: 0.016375491818952988,global_loss: 0.4747304916381836\n",
      "Step 25: BSNE_Loss=0.4757,local_loss: 0.018743979663657415,global_loss: 0.4569575786590576\n",
      "Step 26: BSNE_Loss=0.4544,local_loss: 0.018945069291885074,global_loss: 0.43543311953544617\n",
      "Step 27: BSNE_Loss=0.4900,local_loss: 0.01604578219842459,global_loss: 0.47396451234817505\n",
      "Step 28: BSNE_Loss=0.4913,local_loss: 0.016071716706001108,global_loss: 0.4752000868320465\n",
      "Step 29: BSNE_Loss=0.3956,local_loss: 0.024799252038586003,global_loss: 0.3707917332649231\n",
      "Step 30: BSNE_Loss=0.4049,local_loss: 0.024891437283163374,global_loss: 0.37996330857276917\n",
      "Step 31: BSNE_Loss=0.5234,local_loss: 0.013134428772563048,global_loss: 0.5103009939193726\n",
      "Step 32: BSNE_Loss=0.4935,local_loss: 0.01663535675511855,global_loss: 0.4768567681312561\n",
      "Step 33: BSNE_Loss=0.4136,local_loss: 0.022681605677551057,global_loss: 0.3909449279308319\n",
      "Step 34: BSNE_Loss=0.4880,local_loss: 0.016051823690424724,global_loss: 0.4719636142253876\n",
      "Step 35: BSNE_Loss=0.5147,local_loss: 0.013043548770573829,global_loss: 0.5016966462135315\n",
      "Step 36: BSNE_Loss=0.5201,local_loss: 0.014017711049870575,global_loss: 0.5060619711875916\n",
      "Step 37: BSNE_Loss=0.5149,local_loss: 0.012117256247320858,global_loss: 0.5028247833251953\n",
      "Step 38: BSNE_Loss=0.5260,local_loss: 0.012708426295622608,global_loss: 0.5133010149002075\n",
      "Step 39: BSNE_Loss=0.4610,local_loss: 0.01820504861533343,global_loss: 0.4427550137042999\n",
      "Step 40: BSNE_Loss=0.4101,local_loss: 0.02500481468447151,global_loss: 0.3850896656513214\n",
      "Step 41: BSNE_Loss=0.5450,local_loss: 0.01160390466624453,global_loss: 0.5334104299545288\n",
      "Step 42: BSNE_Loss=0.4249,local_loss: 0.022433983092604394,global_loss: 0.4024319648742676\n",
      "Step 43: BSNE_Loss=0.5010,local_loss: 0.01543075079223953,global_loss: 0.48557060956954956\n",
      "Step 44: BSNE_Loss=0.5157,local_loss: 0.015112215393145273,global_loss: 0.5005976557731628\n",
      "Step 45: BSNE_Loss=0.4423,local_loss: 0.019041771329610158,global_loss: 0.42328882217407227\n",
      "Step 46: BSNE_Loss=0.5402,local_loss: 0.013358907724097946,global_loss: 0.5267928838729858\n",
      "Step 47: BSNE_Loss=0.5191,local_loss: 0.020112642787425365,global_loss: 0.49903497099876404\n",
      "Step 48: BSNE_Loss=0.4912,local_loss: 0.01385212754759158,global_loss: 0.4773237109184265\n",
      "Step 49: BSNE_Loss=0.4748,local_loss: 0.019173744971011868,global_loss: 0.4556528329849243\n",
      "Step 50: BSNE_Loss=0.5379,local_loss: 0.014715756736954315,global_loss: 0.5231658220291138\n",
      "Step 51: BSNE_Loss=0.3611,local_loss: 0.031270806792912595,global_loss: 0.3298758566379547\n",
      "Step 52: BSNE_Loss=0.4445,local_loss: 0.022515637452925237,global_loss: 0.4220291078090668\n",
      "Step 53: BSNE_Loss=0.4659,local_loss: 0.02390865189238449,global_loss: 0.4419543743133545\n",
      "Step 54: BSNE_Loss=0.4730,local_loss: 0.02120221500047788,global_loss: 0.45179808139801025\n",
      "Step 55: BSNE_Loss=0.4465,local_loss: 0.024251341387783943,global_loss: 0.42226442694664\n",
      "Step 56: BSNE_Loss=0.4101,local_loss: 0.020503644490720962,global_loss: 0.3896075189113617\n",
      "Step 57: BSNE_Loss=0.4629,local_loss: 0.01913308364561448,global_loss: 0.44381141662597656\n",
      "Step 58: BSNE_Loss=0.5290,local_loss: 0.016357325026173236,global_loss: 0.5126447677612305\n",
      "Step 59: BSNE_Loss=0.3744,local_loss: 0.022626502960795304,global_loss: 0.35180142521858215\n",
      "Step 60: BSNE_Loss=0.3936,local_loss: 0.03351630149911185,global_loss: 0.360100656747818\n",
      "Step 61: BSNE_Loss=0.4971,local_loss: 0.020337975125848815,global_loss: 0.4767826795578003\n",
      "Step 62: BSNE_Loss=0.4916,local_loss: 0.01585412076080337,global_loss: 0.4757702946662903\n",
      "Step 63: BSNE_Loss=0.3720,local_loss: 0.028256438797149255,global_loss: 0.34370338916778564\n",
      "Step 64: BSNE_Loss=0.4671,local_loss: 0.019114218983874682,global_loss: 0.4479662775993347\n",
      "Step 65: BSNE_Loss=0.4459,local_loss: 0.025090843324091373,global_loss: 0.42085200548171997\n",
      "Step 66: BSNE_Loss=0.4696,local_loss: 0.027380997059168806,global_loss: 0.4421898126602173\n",
      "Step 67: BSNE_Loss=0.4658,local_loss: 0.025479453809965535,global_loss: 0.4403134882450104\n",
      "Step 68: BSNE_Loss=0.4033,local_loss: 0.03297927509816249,global_loss: 0.3702835440635681\n",
      "Step 69: BSNE_Loss=0.3802,local_loss: 0.026466308997190317,global_loss: 0.3537508249282837\n",
      "Step 70: BSNE_Loss=0.4508,local_loss: 0.02298434970036784,global_loss: 0.4277825951576233\n",
      "Step 71: BSNE_Loss=0.3786,local_loss: 0.03051657221671862,global_loss: 0.34805259108543396\n",
      "Step 72: BSNE_Loss=0.3593,local_loss: 0.031399800176580756,global_loss: 0.3278605341911316\n",
      "Step 73: BSNE_Loss=0.4182,local_loss: 0.0309773687500801,global_loss: 0.3871769309043884\n",
      "Step 74: BSNE_Loss=0.4438,local_loss: 0.029128711658220282,global_loss: 0.4146844744682312\n",
      "Step 75: BSNE_Loss=0.4034,local_loss: 0.026589999383873075,global_loss: 0.37677350640296936\n",
      "Step 76: BSNE_Loss=0.4161,local_loss: 0.02827337891161223,global_loss: 0.3878190815448761\n",
      "Step 77: BSNE_Loss=0.4814,local_loss: 0.03089198512861299,global_loss: 0.45050108432769775\n",
      "Step 78: BSNE_Loss=0.4979,local_loss: 0.02581570843948959,global_loss: 0.4720613360404968\n",
      "Step 79: BSNE_Loss=0.3628,local_loss: 0.03131238667819067,global_loss: 0.33145397901535034\n",
      "Step 80: BSNE_Loss=0.4552,local_loss: 0.025638971861727197,global_loss: 0.42952919006347656\n",
      "Step 81: BSNE_Loss=0.5318,local_loss: 0.02588165466009804,global_loss: 0.5059407949447632\n",
      "Step 82: BSNE_Loss=0.5520,local_loss: 0.02336317846964253,global_loss: 0.5286639928817749\n",
      "Step 83: BSNE_Loss=0.4295,local_loss: 0.025243191108412926,global_loss: 0.4042223393917084\n",
      "Step 84: BSNE_Loss=0.2918,local_loss: 0.033781194399286396,global_loss: 0.25803712010383606\n",
      "Step 85: BSNE_Loss=0.4053,local_loss: 0.02578606184400841,global_loss: 0.37954890727996826\n",
      "Step 86: BSNE_Loss=0.2743,local_loss: 0.0399368699617512,global_loss: 0.2344035655260086\n",
      "Step 87: BSNE_Loss=0.3471,local_loss: 0.04469033351336109,global_loss: 0.30245891213417053\n",
      "Step 88: BSNE_Loss=0.3248,local_loss: 0.045952343572908415,global_loss: 0.2788848578929901\n",
      "Step 89: BSNE_Loss=0.3317,local_loss: 0.03964709210182521,global_loss: 0.2920091450214386\n",
      "Step 90: BSNE_Loss=0.3729,local_loss: 0.042990277348997886,global_loss: 0.3299380838871002\n",
      "Step 91: BSNE_Loss=0.4152,local_loss: 0.02954539874633761,global_loss: 0.3856452703475952\n",
      "Step 92: BSNE_Loss=0.4867,local_loss: 0.041354227732151784,global_loss: 0.4453580975532532\n",
      "Step 93: BSNE_Loss=0.3402,local_loss: 0.03488750134625943,global_loss: 0.3052997291088104\n",
      "Step 94: BSNE_Loss=0.4378,local_loss: 0.03411221855484561,global_loss: 0.40367361903190613\n",
      "Step 95: BSNE_Loss=0.4801,local_loss: 0.029994533326272076,global_loss: 0.4501001834869385\n",
      "Step 96: BSNE_Loss=0.4401,local_loss: 0.02331144420640553,global_loss: 0.4168321192264557\n",
      "Step 97: BSNE_Loss=0.4419,local_loss: 0.03923189857448038,global_loss: 0.4026965796947479\n",
      "Step 98: BSNE_Loss=0.4195,local_loss: 0.03595939376783788,global_loss: 0.38356634974479675\n",
      "Step 99: BSNE_Loss=0.2923,local_loss: 0.03501000936287235,global_loss: 0.257269948720932\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 100: BSNE_Loss=0.3261,local_loss: 0.04168236450587335,global_loss: 0.2844517230987549\n",
      "Step 101: BSNE_Loss=0.3501,local_loss: 0.04168796008896761,global_loss: 0.30846068263053894\n",
      "Step 102: BSNE_Loss=0.2982,local_loss: 0.03760976468719096,global_loss: 0.26054978370666504\n",
      "Step 103: BSNE_Loss=0.3157,local_loss: 0.043264504939969375,global_loss: 0.2724658250808716\n",
      "Step 104: BSNE_Loss=0.4157,local_loss: 0.03584671321584121,global_loss: 0.37989333271980286\n",
      "Step 105: BSNE_Loss=0.2112,local_loss: 0.044941628199664845,global_loss: 0.166302889585495\n",
      "Step 106: BSNE_Loss=0.3842,local_loss: 0.0569737900181166,global_loss: 0.3272075653076172\n",
      "Step 107: BSNE_Loss=0.3678,local_loss: 0.04819370794301133,global_loss: 0.3196461498737335\n",
      "Step 108: BSNE_Loss=0.4331,local_loss: 0.05325452629199294,global_loss: 0.3798653781414032\n",
      "Step 109: BSNE_Loss=0.1948,local_loss: 0.04924747669923955,global_loss: 0.14553630352020264\n",
      "Step 110: BSNE_Loss=0.3003,local_loss: 0.0458933820183638,global_loss: 0.25440606474876404\n",
      "Step 111: BSNE_Loss=0.4387,local_loss: 0.04173322485560858,global_loss: 0.39695072174072266\n",
      "Step 112: BSNE_Loss=0.3248,local_loss: 0.04996518092546887,global_loss: 0.27478575706481934\n",
      "Step 113: BSNE_Loss=0.3685,local_loss: 0.04951277250301193,global_loss: 0.31901103258132935\n",
      "Step 114: BSNE_Loss=0.2363,local_loss: 0.04838955100223927,global_loss: 0.18794317543506622\n",
      "Step 115: BSNE_Loss=0.2894,local_loss: 0.04452039181181812,global_loss: 0.24484357237815857\n",
      "Step 116: BSNE_Loss=0.3968,local_loss: 0.05355488041746627,global_loss: 0.34327757358551025\n",
      "Step 117: BSNE_Loss=0.4107,local_loss: 0.03829555996176297,global_loss: 0.3724113404750824\n",
      "Step 118: BSNE_Loss=0.3325,local_loss: 0.05803324451838314,global_loss: 0.2744433581829071\n",
      "Step 119: BSNE_Loss=0.3640,local_loss: 0.05340061516086033,global_loss: 0.3105601370334625\n",
      "Step 120: BSNE_Loss=0.2911,local_loss: 0.058140191553852534,global_loss: 0.23293103277683258\n",
      "Step 121: BSNE_Loss=0.3356,local_loss: 0.05225350313656579,global_loss: 0.2833788990974426\n",
      "Step 122: BSNE_Loss=0.3511,local_loss: 0.05417262538097427,global_loss: 0.29689228534698486\n",
      "Step 123: BSNE_Loss=0.1041,local_loss: 0.06429279034398358,global_loss: 0.03979936242103577\n",
      "Step 124: BSNE_Loss=0.4688,local_loss: 0.04706108993988957,global_loss: 0.421703964471817\n",
      "Step 125: BSNE_Loss=0.3738,local_loss: 0.04468002009930245,global_loss: 0.3291446268558502\n",
      "Step 126: BSNE_Loss=0.1642,local_loss: 0.05413574169530889,global_loss: 0.1101045235991478\n",
      "Step 127: BSNE_Loss=0.2954,local_loss: 0.05890147001335165,global_loss: 0.23653876781463623\n",
      "Step 128: BSNE_Loss=-0.0178,local_loss: 0.0768006542297061,global_loss: -0.09457327425479889\n",
      "Center node 4174 finished at step 129 with loss -0.0178\n",
      "\n",
      "=== Pretraining Epoch 13 (Center Node: 10609) ===\n",
      "Step 0: BSNE_Loss=0.2970,local_loss: 0.06053762765481259,global_loss: 0.23644861578941345\n",
      "Step 1: BSNE_Loss=0.1052,local_loss: 0.07636415688553366,global_loss: 0.02883596532046795\n",
      "Step 2: BSNE_Loss=-0.0395,local_loss: 0.02539230200302343,global_loss: -0.06491128355264664\n",
      "Center node 10609 finished at step 3 with loss -0.0395\n",
      "\n",
      "=== Pretraining Epoch 14 (Center Node: 505) ===\n",
      "Step 0: BSNE_Loss=0.1214,local_loss: 0.036900339915373476,global_loss: 0.08451835811138153\n",
      "Step 1: BSNE_Loss=0.1667,local_loss: 0.03234365777554678,global_loss: 0.13432981073856354\n",
      "Step 2: BSNE_Loss=0.1620,local_loss: 0.039137617633203434,global_loss: 0.12288329005241394\n",
      "Step 3: BSNE_Loss=0.2485,local_loss: 0.06081367183782482,global_loss: 0.18765033781528473\n",
      "Step 4: BSNE_Loss=0.2579,local_loss: 0.0402234553181909,global_loss: 0.21763859689235687\n",
      "Step 5: BSNE_Loss=0.1694,local_loss: 0.04526715967567078,global_loss: 0.12408915162086487\n",
      "Step 6: BSNE_Loss=0.4040,local_loss: 0.03429490760431193,global_loss: 0.3697489798069\n",
      "Step 7: BSNE_Loss=0.4050,local_loss: 0.047420408632115926,global_loss: 0.3576256334781647\n",
      "Step 8: BSNE_Loss=0.3124,local_loss: 0.03825838583132197,global_loss: 0.27410146594047546\n",
      "Step 9: BSNE_Loss=0.0970,local_loss: 0.039946007631821004,global_loss: 0.057016484439373016\n",
      "Step 10: BSNE_Loss=0.3360,local_loss: 0.03830567508737376,global_loss: 0.2976747453212738\n",
      "Step 11: BSNE_Loss=0.1313,local_loss: 0.03592858278979978,global_loss: 0.09534639865159988\n",
      "Step 12: BSNE_Loss=0.1551,local_loss: 0.044588669702205395,global_loss: 0.11047296226024628\n",
      "Step 13: BSNE_Loss=0.2333,local_loss: 0.027755772962868335,global_loss: 0.20554527640342712\n",
      "Step 14: BSNE_Loss=0.1697,local_loss: 0.0283899685705179,global_loss: 0.1413530856370926\n",
      "Step 15: BSNE_Loss=0.3790,local_loss: 0.053730434476037155,global_loss: 0.32531601190567017\n",
      "Step 16: BSNE_Loss=-0.1913,local_loss: 0.036754815777039485,global_loss: -0.22809603810310364\n",
      "Center node 505 finished at step 17 with loss -0.1913\n",
      "\n",
      "=== Pretraining Epoch 15 (Center Node: 1893) ===\n",
      "Step 0: BSNE_Loss=0.0107,local_loss: 0.0905538265553347,global_loss: -0.07988470792770386\n",
      "Center node 1893 finished at step 1 with loss 0.0107\n",
      "\n",
      "=== Pretraining Epoch 16 (Center Node: 4922) ===\n",
      "Step 0: BSNE_Loss=0.8244,local_loss: 0.03754854562773491,global_loss: 0.7868189811706543\n",
      "Step 1: BSNE_Loss=0.6451,local_loss: 0.03933636231770912,global_loss: 0.605726957321167\n",
      "Step 2: BSNE_Loss=0.6414,local_loss: 0.03630707404113471,global_loss: 0.6051017045974731\n",
      "Step 3: BSNE_Loss=0.5644,local_loss: 0.03177954255853821,global_loss: 0.532651960849762\n",
      "Step 4: BSNE_Loss=0.5140,local_loss: 0.03298486663087531,global_loss: 0.4810548424720764\n",
      "Step 5: BSNE_Loss=0.5289,local_loss: 0.03132205432752072,global_loss: 0.4976078271865845\n",
      "Step 6: BSNE_Loss=0.5714,local_loss: 0.03194845297154596,global_loss: 0.5394497513771057\n",
      "Step 7: BSNE_Loss=0.4520,local_loss: 0.030877832846060174,global_loss: 0.4210846424102783\n",
      "Step 8: BSNE_Loss=0.7051,local_loss: 0.012565085788289403,global_loss: 0.6925235390663147\n",
      "Step 9: BSNE_Loss=0.4679,local_loss: 0.01229184262750047,global_loss: 0.45563581585884094\n",
      "Step 10: BSNE_Loss=0.4673,local_loss: 0.01362877512672047,global_loss: 0.4536339342594147\n",
      "Step 11: BSNE_Loss=0.5261,local_loss: 0.010518215732294152,global_loss: 0.515532910823822\n",
      "Step 12: BSNE_Loss=0.4812,local_loss: 0.012586825909184349,global_loss: 0.46858149766921997\n",
      "Step 13: BSNE_Loss=0.4538,local_loss: 0.010350826894708365,global_loss: 0.4434821903705597\n",
      "Step 14: BSNE_Loss=0.4195,local_loss: 0.010479651189746884,global_loss: 0.409066766500473\n",
      "Step 15: BSNE_Loss=0.4299,local_loss: 0.013332587435795355,global_loss: 0.41657114028930664\n",
      "Step 16: BSNE_Loss=0.3712,local_loss: 0.017517695417786656,global_loss: 0.35371172428131104\n",
      "Step 17: BSNE_Loss=0.4581,local_loss: 0.011148988552688165,global_loss: 0.44694218039512634\n",
      "Step 18: BSNE_Loss=0.4611,local_loss: 0.01624716784766638,global_loss: 0.44485342502593994\n",
      "Step 19: BSNE_Loss=0.3779,local_loss: 0.017261378944227742,global_loss: 0.3605945110321045\n",
      "Step 20: BSNE_Loss=0.3948,local_loss: 0.0165002800443697,global_loss: 0.378274530172348\n",
      "Step 21: BSNE_Loss=0.3915,local_loss: 0.017637688626014662,global_loss: 0.3738226294517517\n",
      "Step 22: BSNE_Loss=0.3713,local_loss: 0.015782544650260064,global_loss: 0.3555077910423279\n",
      "Step 23: BSNE_Loss=0.4503,local_loss: 0.010600244995024558,global_loss: 0.43968185782432556\n",
      "Step 24: BSNE_Loss=0.3928,local_loss: 0.010064373325576746,global_loss: 0.38276776671409607\n",
      "Step 25: BSNE_Loss=0.4435,local_loss: 0.012702587244713959,global_loss: 0.43082430958747864\n",
      "Step 26: BSNE_Loss=0.4220,local_loss: 0.012798233345861295,global_loss: 0.40923914313316345\n",
      "Step 27: BSNE_Loss=0.3663,local_loss: 0.017116665917814605,global_loss: 0.3491688072681427\n",
      "Step 28: BSNE_Loss=0.3817,local_loss: 0.01874133663004115,global_loss: 0.36293327808380127\n",
      "Step 29: BSNE_Loss=0.3748,local_loss: 0.01868974611976073,global_loss: 0.35614368319511414\n",
      "Step 30: BSNE_Loss=0.4380,local_loss: 0.014273525038766948,global_loss: 0.4237291216850281\n",
      "Step 31: BSNE_Loss=0.3612,local_loss: 0.016749045765657295,global_loss: 0.3444969356060028\n",
      "Step 32: BSNE_Loss=0.3613,local_loss: 0.019280202266335724,global_loss: 0.34203535318374634\n",
      "Step 33: BSNE_Loss=0.4299,local_loss: 0.021044264170697498,global_loss: 0.40886765718460083\n",
      "Step 34: BSNE_Loss=0.3656,local_loss: 0.021423137710370493,global_loss: 0.34417033195495605\n",
      "Step 35: BSNE_Loss=0.3722,local_loss: 0.01876121886939883,global_loss: 0.3534347414970398\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 36: BSNE_Loss=0.4246,local_loss: 0.011719804550390244,global_loss: 0.41284996271133423\n",
      "Step 37: BSNE_Loss=0.3659,local_loss: 0.022878565686212465,global_loss: 0.3429751992225647\n",
      "Step 38: BSNE_Loss=0.3673,local_loss: 0.022261424595970237,global_loss: 0.345000684261322\n",
      "Step 39: BSNE_Loss=0.4092,local_loss: 0.02250488040330791,global_loss: 0.38667845726013184\n",
      "Step 40: BSNE_Loss=0.3440,local_loss: 0.021423916788448082,global_loss: 0.3225661814212799\n",
      "Step 41: BSNE_Loss=0.2966,local_loss: 0.02880200214087359,global_loss: 0.26778537034988403\n",
      "Step 42: BSNE_Loss=0.2602,local_loss: 0.029751368802912938,global_loss: 0.2304118275642395\n",
      "Step 43: BSNE_Loss=0.2824,local_loss: 0.03365061234544926,global_loss: 0.24874170124530792\n",
      "Step 44: BSNE_Loss=0.3162,local_loss: 0.029510859260051807,global_loss: 0.2867189049720764\n",
      "Step 45: BSNE_Loss=0.2072,local_loss: 0.04212810547271259,global_loss: 0.16508758068084717\n",
      "Step 46: BSNE_Loss=0.2149,local_loss: 0.032530753783373355,global_loss: 0.1823200136423111\n",
      "Step 47: BSNE_Loss=0.2318,local_loss: 0.04278289975102099,global_loss: 0.1889704018831253\n",
      "Step 48: BSNE_Loss=0.2635,local_loss: 0.026584973727581347,global_loss: 0.23689308762550354\n",
      "Step 49: BSNE_Loss=0.2137,local_loss: 0.03800190735430269,global_loss: 0.17567269504070282\n",
      "Step 50: BSNE_Loss=0.3955,local_loss: 0.037711988798772834,global_loss: 0.35782870650291443\n",
      "Step 51: BSNE_Loss=0.1225,local_loss: 0.05029738824489154,global_loss: 0.07220914214849472\n",
      "Step 52: BSNE_Loss=0.2158,local_loss: 0.049931950411160365,global_loss: 0.16582106053829193\n",
      "Step 53: BSNE_Loss=0.3373,local_loss: 0.041784757849705145,global_loss: 0.2955184280872345\n",
      "Step 54: BSNE_Loss=0.2596,local_loss: 0.03837306445716058,global_loss: 0.22118450701236725\n",
      "Step 55: BSNE_Loss=0.0489,local_loss: 0.04917238197525169,global_loss: -0.0002456609217915684\n",
      "Center node 4922 finished at step 56 with loss 0.0489\n",
      "\n",
      "=== Pretraining Epoch 17 (Center Node: 5585) ===\n",
      "Step 0: BSNE_Loss=0.3036,local_loss: 0.05896395321809892,global_loss: 0.24459148943424225\n",
      "Step 1: BSNE_Loss=0.0611,local_loss: 0.05861314938619338,global_loss: 0.0024509173817932606\n",
      "Step 2: BSNE_Loss=0.2145,local_loss: 0.058844277619477164,global_loss: 0.15570247173309326\n",
      "Step 3: BSNE_Loss=0.3390,local_loss: 0.06054127497160433,global_loss: 0.278482049703598\n",
      "Step 4: BSNE_Loss=0.3254,local_loss: 0.06292213609646698,global_loss: 0.2624766528606415\n",
      "Step 5: BSNE_Loss=0.1575,local_loss: 0.040026982272102385,global_loss: 0.11748523265123367\n",
      "Step 6: BSNE_Loss=0.0633,local_loss: 0.06353337974473686,global_loss: -0.00023826815595384687\n",
      "Step 7: BSNE_Loss=0.0613,local_loss: 0.05084265877846187,global_loss: 0.010500185191631317\n",
      "Step 8: BSNE_Loss=0.2570,local_loss: 0.06927145526370988,global_loss: 0.18768876791000366\n",
      "Step 9: BSNE_Loss=0.0687,local_loss: 0.06252622048150405,global_loss: 0.006201429758220911\n",
      "Step 10: BSNE_Loss=0.1221,local_loss: 0.05257177550752681,global_loss: 0.06950207054615021\n",
      "Step 11: BSNE_Loss=0.0320,local_loss: 0.08487769224386177,global_loss: -0.052893999963998795\n",
      "Center node 5585 finished at step 12 with loss 0.0320\n",
      "\n",
      "=== Pretraining Epoch 18 (Center Node: 2063) ===\n",
      "Step 0: BSNE_Loss=0.1375,local_loss: 0.06261393567068196,global_loss: 0.07485957443714142\n",
      "Step 1: BSNE_Loss=0.2384,local_loss: 0.08159409014486647,global_loss: 0.15677230060100555\n",
      "Step 2: BSNE_Loss=0.2660,local_loss: 0.08571715649416467,global_loss: 0.1802525669336319\n",
      "Step 3: BSNE_Loss=-0.0695,local_loss: 0.07315729727523722,global_loss: -0.14270153641700745\n",
      "Center node 2063 finished at step 4 with loss -0.0695\n",
      "\n",
      "=== Pretraining Epoch 19 (Center Node: 5103) ===\n",
      "Step 0: BSNE_Loss=0.1333,local_loss: 0.07322729690792765,global_loss: 0.060031820088624954\n",
      "Step 1: BSNE_Loss=0.2248,local_loss: 0.06307582576224943,global_loss: 0.16173560917377472\n",
      "Step 2: BSNE_Loss=0.1435,local_loss: 0.0674006150421965,global_loss: 0.07610373198986053\n",
      "Step 3: BSNE_Loss=0.4029,local_loss: 0.06453978062418214,global_loss: 0.33833304047584534\n",
      "Step 4: BSNE_Loss=0.2003,local_loss: 0.09254230444986997,global_loss: 0.10776232928037643\n",
      "Step 5: BSNE_Loss=0.3457,local_loss: 0.07289414602268622,global_loss: 0.2727831304073334\n",
      "Step 6: BSNE_Loss=0.2236,local_loss: 0.07656129323121588,global_loss: 0.14708425104618073\n",
      "Step 7: BSNE_Loss=0.4832,local_loss: 0.07910574036694541,global_loss: 0.4040563404560089\n",
      "Step 8: BSNE_Loss=0.6249,local_loss: 0.05257848769269058,global_loss: 0.572292685508728\n",
      "Step 9: BSNE_Loss=0.1165,local_loss: 0.05900892581351684,global_loss: 0.05746791884303093\n",
      "Step 10: BSNE_Loss=0.1305,local_loss: 0.09111193102666498,global_loss: 0.03940073400735855\n",
      "Step 11: BSNE_Loss=0.3803,local_loss: 0.06549258657368714,global_loss: 0.3148365914821625\n",
      "Step 12: BSNE_Loss=0.1950,local_loss: 0.07623947694539994,global_loss: 0.11871921271085739\n",
      "Step 13: BSNE_Loss=0.4513,local_loss: 0.059201584698142545,global_loss: 0.3920949399471283\n",
      "Step 14: BSNE_Loss=0.0955,local_loss: 0.05901434913936244,global_loss: 0.03647705167531967\n",
      "Step 15: BSNE_Loss=0.2340,local_loss: 0.05947275168399068,global_loss: 0.17450737953186035\n",
      "Step 16: BSNE_Loss=0.1765,local_loss: 0.056240292529195615,global_loss: 0.12021639198064804\n",
      "Step 17: BSNE_Loss=0.2932,local_loss: 0.059915379265614424,global_loss: 0.23328614234924316\n",
      "Step 18: BSNE_Loss=0.2978,local_loss: 0.06860075306667937,global_loss: 0.22917599976062775\n",
      "Step 19: BSNE_Loss=0.3239,local_loss: 0.07466254590843611,global_loss: 0.24923452734947205\n",
      "Step 20: BSNE_Loss=0.3962,local_loss: 0.056309503232505366,global_loss: 0.33988267183303833\n",
      "Step 21: BSNE_Loss=0.3648,local_loss: 0.05520077674221344,global_loss: 0.3096184730529785\n",
      "Step 22: BSNE_Loss=0.2544,local_loss: 0.09168377236062547,global_loss: 0.1627344787120819\n",
      "Step 23: BSNE_Loss=0.4226,local_loss: 0.06587889936580585,global_loss: 0.35670608282089233\n",
      "Step 24: BSNE_Loss=0.3153,local_loss: 0.05342782726974396,global_loss: 0.26186269521713257\n",
      "Step 25: BSNE_Loss=0.2010,local_loss: 0.08324917836048536,global_loss: 0.11773079633712769\n",
      "Step 26: BSNE_Loss=0.3261,local_loss: 0.08518015007858296,global_loss: 0.24091258645057678\n",
      "Step 27: BSNE_Loss=0.3430,local_loss: 0.07609379769355806,global_loss: 0.2668895125389099\n",
      "Step 28: BSNE_Loss=0.1325,local_loss: 0.0771273344895588,global_loss: 0.05536932870745659\n",
      "Step 29: BSNE_Loss=0.3155,local_loss: 0.06266880788174523,global_loss: 0.2528268098831177\n",
      "Step 30: BSNE_Loss=0.3272,local_loss: 0.0675943792165604,global_loss: 0.2595709264278412\n",
      "Step 31: BSNE_Loss=0.2381,local_loss: 0.05545001302473855,global_loss: 0.1826259344816208\n",
      "Step 32: BSNE_Loss=0.2473,local_loss: 0.06736115178005977,global_loss: 0.17993685603141785\n",
      "Step 33: BSNE_Loss=0.1827,local_loss: 0.06736391440857106,global_loss: 0.11534438282251358\n",
      "Step 34: BSNE_Loss=0.4586,local_loss: 0.06828705856031317,global_loss: 0.390362024307251\n",
      "Step 35: BSNE_Loss=0.2197,local_loss: 0.0680934952438869,global_loss: 0.15165087580680847\n",
      "Step 36: BSNE_Loss=0.3161,local_loss: 0.07919257955734843,global_loss: 0.23686645925045013\n",
      "Step 37: BSNE_Loss=0.2019,local_loss: 0.10009925626435368,global_loss: 0.10176380723714828\n",
      "Step 38: BSNE_Loss=0.1834,local_loss: 0.060213004905224694,global_loss: 0.12315714359283447\n",
      "Step 39: BSNE_Loss=0.6304,local_loss: 0.04452067215289651,global_loss: 0.5858612060546875\n",
      "Step 40: BSNE_Loss=0.1667,local_loss: 0.06583854774017821,global_loss: 0.10083790868520737\n",
      "Step 41: BSNE_Loss=0.0221,local_loss: 0.07960895656047547,global_loss: -0.057464953511953354\n",
      "Center node 5103 finished at step 42 with loss 0.0221\n",
      "\n",
      "=== Pretraining Epoch 20 (Center Node: 9440) ===\n",
      "Step 0: BSNE_Loss=0.2985,local_loss: 0.04226398355400404,global_loss: 0.25620630383491516\n",
      "Step 1: BSNE_Loss=0.2887,local_loss: 0.03187360672129564,global_loss: 0.25678151845932007\n",
      "Step 2: BSNE_Loss=0.3785,local_loss: 0.03447569931954028,global_loss: 0.3439774215221405\n",
      "Step 3: BSNE_Loss=0.3759,local_loss: 0.04913814128034861,global_loss: 0.32679280638694763\n",
      "Step 4: BSNE_Loss=0.1615,local_loss: 0.025124501437939976,global_loss: 0.13641421496868134\n",
      "Step 5: BSNE_Loss=0.1809,local_loss: 0.03608030492834034,global_loss: 0.1448180079460144\n",
      "Step 6: BSNE_Loss=0.2980,local_loss: 0.05616146396939672,global_loss: 0.24179351329803467\n",
      "Step 7: BSNE_Loss=0.2180,local_loss: 0.02279288987175962,global_loss: 0.19525064527988434\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 8: BSNE_Loss=0.1738,local_loss: 0.03013079524242735,global_loss: 0.14362336695194244\n",
      "Step 9: BSNE_Loss=0.0594,local_loss: 0.049934403702722036,global_loss: 0.009415979497134686\n",
      "Step 10: BSNE_Loss=0.3495,local_loss: 0.0385052408326995,global_loss: 0.310994952917099\n",
      "Step 11: BSNE_Loss=0.1690,local_loss: 0.03614423913538726,global_loss: 0.13288357853889465\n",
      "Step 12: BSNE_Loss=0.0345,local_loss: 0.03799927777713848,global_loss: -0.0034767556935548782\n",
      "Center node 9440 finished at step 13 with loss 0.0345\n",
      "\n",
      "=== Pretraining Epoch 21 (Center Node: 6376) ===\n",
      "Step 0: BSNE_Loss=0.0236,local_loss: 0.04553837827426771,global_loss: -0.02196519263088703\n",
      "Center node 6376 finished at step 1 with loss 0.0236\n",
      "\n",
      "=== Pretraining Epoch 22 (Center Node: 2192) ===\n",
      "Step 0: BSNE_Loss=0.1584,local_loss: 0.03933600955805172,global_loss: 0.11904489994049072\n",
      "Step 1: BSNE_Loss=0.4246,local_loss: 0.04427687612410206,global_loss: 0.38036325573921204\n",
      "Step 2: BSNE_Loss=0.1646,local_loss: 0.03872604362987493,global_loss: 0.12584218382835388\n",
      "Step 3: BSNE_Loss=0.1165,local_loss: 0.0389955489082614,global_loss: 0.07754658162593842\n",
      "Step 4: BSNE_Loss=0.4787,local_loss: 0.03560868170344689,global_loss: 0.44304561614990234\n",
      "Step 5: BSNE_Loss=0.3419,local_loss: 0.03217730649095549,global_loss: 0.3096730411052704\n",
      "Step 6: BSNE_Loss=0.1470,local_loss: 0.036937305660766445,global_loss: 0.11002828180789948\n",
      "Step 7: BSNE_Loss=0.1844,local_loss: 0.03671874750184366,global_loss: 0.14766322076320648\n",
      "Step 8: BSNE_Loss=0.2431,local_loss: 0.05030725726180709,global_loss: 0.19281546771526337\n",
      "Step 9: BSNE_Loss=0.3680,local_loss: 0.04275246253823713,global_loss: 0.325202614068985\n",
      "Step 10: BSNE_Loss=0.3732,local_loss: 0.029354998351075155,global_loss: 0.3438519835472107\n",
      "Step 11: BSNE_Loss=0.3178,local_loss: 0.029323395968548605,global_loss: 0.28848394751548767\n",
      "Step 12: BSNE_Loss=-0.0257,local_loss: 0.05580533811246364,global_loss: -0.08147288858890533\n",
      "Center node 2192 finished at step 13 with loss -0.0257\n",
      "\n",
      "=== Pretraining Epoch 23 (Center Node: 1661) ===\n",
      "Step 0: BSNE_Loss=0.1084,local_loss: 0.05373943489148997,global_loss: 0.05468469858169556\n",
      "Step 1: BSNE_Loss=0.0973,local_loss: 0.03901225392246911,global_loss: 0.058309901505708694\n",
      "Step 2: BSNE_Loss=0.1389,local_loss: 0.06310252806413505,global_loss: 0.07576020807027817\n",
      "Step 3: BSNE_Loss=0.1307,local_loss: 0.04638398947972504,global_loss: 0.0843106359243393\n",
      "Step 4: BSNE_Loss=0.1645,local_loss: 0.04883979749716731,global_loss: 0.1157090812921524\n",
      "Step 5: BSNE_Loss=0.3222,local_loss: 0.029098203786063086,global_loss: 0.2930663228034973\n",
      "Step 6: BSNE_Loss=0.0795,local_loss: 0.028610298102012627,global_loss: 0.05087524279952049\n",
      "Step 7: BSNE_Loss=0.0168,local_loss: 0.07628636509412094,global_loss: -0.059521764516830444\n",
      "Center node 1661 finished at step 8 with loss 0.0168\n",
      "\n",
      "=== Pretraining Epoch 24 (Center Node: 441) ===\n",
      "Step 0: BSNE_Loss=0.3004,local_loss: 0.053125705094456355,global_loss: 0.24731165170669556\n",
      "Step 1: BSNE_Loss=0.2404,local_loss: 0.05943829866283787,global_loss: 0.18092170357704163\n",
      "Step 2: BSNE_Loss=0.2478,local_loss: 0.046382062771502315,global_loss: 0.20143739879131317\n",
      "Step 3: BSNE_Loss=0.0025,local_loss: 0.08158342797780632,global_loss: -0.07903427630662918\n",
      "Center node 441 finished at step 4 with loss 0.0025\n",
      "\n",
      "=== Pretraining Epoch 25 (Center Node: 1628) ===\n",
      "Step 0: BSNE_Loss=0.4711,local_loss: 0.05797885128419083,global_loss: 0.41307878494262695\n",
      "Step 1: BSNE_Loss=0.3440,local_loss: 0.05376544393865706,global_loss: 0.29021430015563965\n",
      "Step 2: BSNE_Loss=0.2637,local_loss: 0.07012047275596442,global_loss: 0.1935882419347763\n",
      "Step 3: BSNE_Loss=0.3061,local_loss: 0.05712535053811313,global_loss: 0.24898242950439453\n",
      "Step 4: BSNE_Loss=0.5219,local_loss: 0.06902207428856381,global_loss: 0.45289304852485657\n",
      "Step 5: BSNE_Loss=0.4442,local_loss: 0.06767859612512615,global_loss: 0.3765711486339569\n",
      "Step 6: BSNE_Loss=0.4410,local_loss: 0.060530357906351764,global_loss: 0.3804478645324707\n",
      "Step 7: BSNE_Loss=0.4198,local_loss: 0.0580057763732378,global_loss: 0.36177727580070496\n",
      "Step 8: BSNE_Loss=0.1062,local_loss: 0.060010673149742504,global_loss: 0.046189337968826294\n",
      "Step 9: BSNE_Loss=0.4960,local_loss: 0.05025211459917471,global_loss: 0.4457041025161743\n",
      "Step 10: BSNE_Loss=0.3476,local_loss: 0.06203165757795182,global_loss: 0.2855711281299591\n",
      "Step 11: BSNE_Loss=0.0934,local_loss: 0.049016144053184406,global_loss: 0.04434407502412796\n",
      "Step 12: BSNE_Loss=0.0415,local_loss: 0.05431363525978967,global_loss: -0.012828204780817032\n",
      "Center node 1628 finished at step 13 with loss 0.0415\n",
      "\n",
      "=== Pretraining Epoch 26 (Center Node: 3781) ===\n",
      "Step 0: BSNE_Loss=0.7917,local_loss: 0.028389635197947464,global_loss: 0.7632869482040405\n",
      "Step 1: BSNE_Loss=0.6963,local_loss: 0.03249786009727945,global_loss: 0.6637557744979858\n",
      "Step 2: BSNE_Loss=0.6870,local_loss: 0.028043443086841813,global_loss: 0.658925473690033\n",
      "Step 3: BSNE_Loss=0.7343,local_loss: 0.023647219769176255,global_loss: 0.7106283903121948\n",
      "Step 4: BSNE_Loss=0.7163,local_loss: 0.026561665184476353,global_loss: 0.6897094249725342\n",
      "Step 5: BSNE_Loss=0.5743,local_loss: 0.028216567888874375,global_loss: 0.5460988283157349\n",
      "Step 6: BSNE_Loss=0.5569,local_loss: 0.02376794012474692,global_loss: 0.5331239104270935\n",
      "Step 7: BSNE_Loss=0.6828,local_loss: 0.017723278139593193,global_loss: 0.6650664806365967\n",
      "Step 8: BSNE_Loss=0.6104,local_loss: 0.02356522970236819,global_loss: 0.5868293046951294\n",
      "Step 9: BSNE_Loss=0.4058,local_loss: 0.024707859816289847,global_loss: 0.3811129629611969\n",
      "Step 10: BSNE_Loss=0.5188,local_loss: 0.026097589440675736,global_loss: 0.49268752336502075\n",
      "Step 11: BSNE_Loss=0.4313,local_loss: 0.037462648655348985,global_loss: 0.3938433527946472\n",
      "Step 12: BSNE_Loss=0.3552,local_loss: 0.03862124174595809,global_loss: 0.31659451127052307\n",
      "Step 13: BSNE_Loss=0.4993,local_loss: 0.036512312941227254,global_loss: 0.4628000855445862\n",
      "Step 14: BSNE_Loss=0.2626,local_loss: 0.04324874175839413,global_loss: 0.21938589215278625\n",
      "Step 15: BSNE_Loss=0.4585,local_loss: 0.03656039696755757,global_loss: 0.42191082239151\n",
      "Step 16: BSNE_Loss=0.3193,local_loss: 0.0434649972935513,global_loss: 0.27586835622787476\n",
      "Step 17: BSNE_Loss=0.3290,local_loss: 0.04030715263485482,global_loss: 0.28867724537849426\n",
      "Step 18: BSNE_Loss=0.2961,local_loss: 0.021552159833173903,global_loss: 0.2745976448059082\n",
      "Step 19: BSNE_Loss=0.3392,local_loss: 0.04918352449760429,global_loss: 0.2900460958480835\n",
      "Step 20: BSNE_Loss=0.4493,local_loss: 0.043168246852788625,global_loss: 0.4061775803565979\n",
      "Step 21: BSNE_Loss=0.3560,local_loss: 0.04480729794104002,global_loss: 0.31121793389320374\n",
      "Step 22: BSNE_Loss=0.5076,local_loss: 0.03979216504204571,global_loss: 0.467845618724823\n",
      "Step 23: BSNE_Loss=0.3589,local_loss: 0.045503352939729895,global_loss: 0.31339672207832336\n",
      "Step 24: BSNE_Loss=0.4362,local_loss: 0.04972404692574421,global_loss: 0.3864653706550598\n",
      "Step 25: BSNE_Loss=0.3752,local_loss: 0.03748163949404949,global_loss: 0.337748646736145\n",
      "Step 26: BSNE_Loss=0.3503,local_loss: 0.050934837103303106,global_loss: 0.29936984181404114\n",
      "Step 27: BSNE_Loss=0.4110,local_loss: 0.048381126316141054,global_loss: 0.36258798837661743\n",
      "Step 28: BSNE_Loss=0.3490,local_loss: 0.033849279519598284,global_loss: 0.3151768147945404\n",
      "Step 29: BSNE_Loss=0.4378,local_loss: 0.04230734653836572,global_loss: 0.3954509198665619\n",
      "Step 30: BSNE_Loss=0.4150,local_loss: 0.04320785187001522,global_loss: 0.3717465102672577\n",
      "Step 31: BSNE_Loss=0.4045,local_loss: 0.0378252992791438,global_loss: 0.36670202016830444\n",
      "Step 32: BSNE_Loss=0.4140,local_loss: 0.05610506576559174,global_loss: 0.35790497064590454\n",
      "Step 33: BSNE_Loss=0.2888,local_loss: 0.05389726474305383,global_loss: 0.23495082557201385\n",
      "Step 34: BSNE_Loss=0.3540,local_loss: 0.05731236361761434,global_loss: 0.29668161273002625\n",
      "Step 35: BSNE_Loss=0.2056,local_loss: 0.055773216562155024,global_loss: 0.14979200065135956\n",
      "Step 36: BSNE_Loss=0.3887,local_loss: 0.0505948899016922,global_loss: 0.3380686938762665\n",
      "Step 37: BSNE_Loss=0.2532,local_loss: 0.06432815485691312,global_loss: 0.18891513347625732\n",
      "Step 38: BSNE_Loss=0.3604,local_loss: 0.06556465118084526,global_loss: 0.294802188873291\n",
      "Step 39: BSNE_Loss=0.3304,local_loss: 0.06582691689493954,global_loss: 0.2645602226257324\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 40: BSNE_Loss=0.4416,local_loss: 0.06190387988694605,global_loss: 0.37965312600135803\n",
      "Step 41: BSNE_Loss=0.1469,local_loss: 0.08176945820928236,global_loss: 0.06510994583368301\n",
      "Step 42: BSNE_Loss=0.3786,local_loss: 0.05627520577564016,global_loss: 0.32235953211784363\n",
      "Step 43: BSNE_Loss=0.4576,local_loss: 0.05893118600750234,global_loss: 0.3987117111682892\n",
      "Step 44: BSNE_Loss=0.2992,local_loss: 0.06082746564933875,global_loss: 0.23835688829421997\n",
      "Step 45: BSNE_Loss=0.3348,local_loss: 0.06084945887580981,global_loss: 0.27391159534454346\n",
      "Step 46: BSNE_Loss=0.0401,local_loss: 0.0770499737023223,global_loss: -0.03696689009666443\n",
      "Center node 3781 finished at step 47 with loss 0.0401\n",
      "\n",
      "=== Pretraining Epoch 27 (Center Node: 5741) ===\n",
      "Step 0: BSNE_Loss=0.7644,local_loss: 0.019657116899587705,global_loss: 0.7447196245193481\n",
      "Step 1: BSNE_Loss=0.6934,local_loss: 0.031808280408057193,global_loss: 0.6615801453590393\n",
      "Step 2: BSNE_Loss=0.7426,local_loss: 0.030706266001819717,global_loss: 0.7119218707084656\n",
      "Step 3: BSNE_Loss=0.9020,local_loss: 0.027018671795215842,global_loss: 0.8750247955322266\n",
      "Step 4: BSNE_Loss=0.6096,local_loss: 0.013889673029675528,global_loss: 0.5957542061805725\n",
      "Step 5: BSNE_Loss=0.4775,local_loss: 0.024425827920777715,global_loss: 0.45304644107818604\n",
      "Step 6: BSNE_Loss=0.8267,local_loss: 0.02738021924449194,global_loss: 0.7993071675300598\n",
      "Step 7: BSNE_Loss=0.5365,local_loss: 0.02231289818386229,global_loss: 0.5142182111740112\n",
      "Step 8: BSNE_Loss=0.6280,local_loss: 0.014995865025109852,global_loss: 0.6130223870277405\n",
      "Step 9: BSNE_Loss=0.7524,local_loss: 0.018452314370005638,global_loss: 0.7339079976081848\n",
      "Step 10: BSNE_Loss=0.5762,local_loss: 0.0276396918378509,global_loss: 0.5485165119171143\n",
      "Step 11: BSNE_Loss=0.7434,local_loss: 0.013806412766201532,global_loss: 0.7295485734939575\n",
      "Step 12: BSNE_Loss=0.7109,local_loss: 0.021520321919238562,global_loss: 0.689390242099762\n",
      "Step 13: BSNE_Loss=0.6109,local_loss: 0.02316996776426107,global_loss: 0.5877360105514526\n",
      "Step 14: BSNE_Loss=0.7140,local_loss: 0.020148196414518288,global_loss: 0.6938225030899048\n",
      "Step 15: BSNE_Loss=0.6399,local_loss: 0.019050107799339636,global_loss: 0.6208916902542114\n",
      "Step 16: BSNE_Loss=0.5393,local_loss: 0.017416918803501703,global_loss: 0.5218366980552673\n",
      "Step 17: BSNE_Loss=0.7621,local_loss: 0.017994501977914194,global_loss: 0.7441233396530151\n",
      "Step 18: BSNE_Loss=0.6628,local_loss: 0.017294521095728658,global_loss: 0.6454726457595825\n",
      "Step 19: BSNE_Loss=0.8296,local_loss: 0.01034736645021202,global_loss: 0.8193017244338989\n",
      "Step 20: BSNE_Loss=0.6085,local_loss: 0.019643197741276024,global_loss: 0.5888137221336365\n",
      "Step 21: BSNE_Loss=0.5124,local_loss: 0.024226470713355652,global_loss: 0.488166868686676\n",
      "Step 22: BSNE_Loss=0.5249,local_loss: 0.027179195043002057,global_loss: 0.49772414565086365\n",
      "Step 23: BSNE_Loss=0.5459,local_loss: 0.029954084382889712,global_loss: 0.5159772634506226\n",
      "Step 24: BSNE_Loss=0.5394,local_loss: 0.02786562409232482,global_loss: 0.5115545988082886\n",
      "Step 25: BSNE_Loss=0.6841,local_loss: 0.014644384508496257,global_loss: 0.6694586277008057\n",
      "Step 26: BSNE_Loss=0.5962,local_loss: 0.025924078438826055,global_loss: 0.57024747133255\n",
      "Step 27: BSNE_Loss=0.5315,local_loss: 0.02769586287519302,global_loss: 0.5037687420845032\n",
      "Step 28: BSNE_Loss=0.5146,local_loss: 0.028733807154271675,global_loss: 0.48582160472869873\n",
      "Step 29: BSNE_Loss=0.5212,local_loss: 0.030601604137994756,global_loss: 0.490584135055542\n",
      "Step 30: BSNE_Loss=0.6052,local_loss: 0.02586591034073022,global_loss: 0.5793176889419556\n",
      "Step 31: BSNE_Loss=0.6508,local_loss: 0.02162850530337336,global_loss: 0.6291769742965698\n",
      "Step 32: BSNE_Loss=0.6396,local_loss: 0.02146189090336442,global_loss: 0.6181568503379822\n",
      "Step 33: BSNE_Loss=0.4450,local_loss: 0.0320910568228345,global_loss: 0.41294416785240173\n",
      "Step 34: BSNE_Loss=0.3408,local_loss: 0.03944279179934049,global_loss: 0.3013862073421478\n",
      "Step 35: BSNE_Loss=0.5164,local_loss: 0.032704859462645244,global_loss: 0.4837045669555664\n",
      "Step 36: BSNE_Loss=0.4163,local_loss: 0.03487434195950272,global_loss: 0.38140159845352173\n",
      "Step 37: BSNE_Loss=0.5571,local_loss: 0.030876769644849162,global_loss: 0.5261920690536499\n",
      "Step 38: BSNE_Loss=0.4686,local_loss: 0.0343405876378616,global_loss: 0.43430930376052856\n",
      "Step 39: BSNE_Loss=0.3146,local_loss: 0.04710104923813689,global_loss: 0.2674950957298279\n",
      "Step 40: BSNE_Loss=0.2625,local_loss: 0.043836376487331595,global_loss: 0.21863052248954773\n",
      "Step 41: BSNE_Loss=0.2966,local_loss: 0.04904210816774387,global_loss: 0.2475711554288864\n",
      "Step 42: BSNE_Loss=0.5090,local_loss: 0.038238149777976266,global_loss: 0.4707574248313904\n",
      "Step 43: BSNE_Loss=0.3581,local_loss: 0.045327369039675325,global_loss: 0.3127361834049225\n",
      "Step 44: BSNE_Loss=0.3400,local_loss: 0.04680740867729546,global_loss: 0.29315614700317383\n",
      "Step 45: BSNE_Loss=0.3539,local_loss: 0.055964391080858836,global_loss: 0.2979062795639038\n",
      "Step 46: BSNE_Loss=0.4141,local_loss: 0.04686980729141593,global_loss: 0.36721599102020264\n",
      "Step 47: BSNE_Loss=0.4475,local_loss: 0.04393486838757871,global_loss: 0.40358802676200867\n",
      "Step 48: BSNE_Loss=0.2289,local_loss: 0.06211010620902324,global_loss: 0.16677731275558472\n",
      "Step 49: BSNE_Loss=0.2047,local_loss: 0.05457218211044618,global_loss: 0.15009705722332\n",
      "Step 50: BSNE_Loss=0.2708,local_loss: 0.05886094727050356,global_loss: 0.2119482159614563\n",
      "Step 51: BSNE_Loss=0.5591,local_loss: 0.038296930452914865,global_loss: 0.5208340883255005\n",
      "Step 52: BSNE_Loss=0.3170,local_loss: 0.052172611364080024,global_loss: 0.2648625671863556\n",
      "Step 53: BSNE_Loss=0.4152,local_loss: 0.05682809110964977,global_loss: 0.3583739101886749\n",
      "Step 54: BSNE_Loss=0.2424,local_loss: 0.07574127637292583,global_loss: 0.16663594543933868\n",
      "Step 55: BSNE_Loss=0.1240,local_loss: 0.07559940176800532,global_loss: 0.04844684898853302\n",
      "Step 56: BSNE_Loss=0.1953,local_loss: 0.08059497856596173,global_loss: 0.1146789938211441\n",
      "Step 57: BSNE_Loss=0.1650,local_loss: 0.07439589382157877,global_loss: 0.09056570380926132\n",
      "Step 58: BSNE_Loss=0.3354,local_loss: 0.06517458887530611,global_loss: 0.2702353596687317\n",
      "Step 59: BSNE_Loss=0.4019,local_loss: 0.0658214132634399,global_loss: 0.3360636830329895\n",
      "Step 60: BSNE_Loss=0.2830,local_loss: 0.0694026127804048,global_loss: 0.21364711225032806\n",
      "Step 61: BSNE_Loss=0.4417,local_loss: 0.05870753116819237,global_loss: 0.3829602599143982\n",
      "Step 62: BSNE_Loss=0.1216,local_loss: 0.07340539135170995,global_loss: 0.04821241274476051\n",
      "Step 63: BSNE_Loss=0.3514,local_loss: 0.07497439413095797,global_loss: 0.27643075585365295\n",
      "Step 64: BSNE_Loss=0.3202,local_loss: 0.07084085854354824,global_loss: 0.24934536218643188\n",
      "Step 65: BSNE_Loss=0.3831,local_loss: 0.06867309891261586,global_loss: 0.3144298195838928\n",
      "Step 66: BSNE_Loss=0.1049,local_loss: 0.08259172141532882,global_loss: 0.022322461009025574\n",
      "Step 67: BSNE_Loss=0.2308,local_loss: 0.07899953627046788,global_loss: 0.15181270241737366\n",
      "Step 68: BSNE_Loss=0.2069,local_loss: 0.08123518469398097,global_loss: 0.12568944692611694\n",
      "Step 69: BSNE_Loss=0.1450,local_loss: 0.0804945118988224,global_loss: 0.0644611194729805\n",
      "Step 70: BSNE_Loss=0.2321,local_loss: 0.08474957502272411,global_loss: 0.14737161993980408\n",
      "Step 71: BSNE_Loss=0.3394,local_loss: 0.06624032333362191,global_loss: 0.27317309379577637\n",
      "Step 72: BSNE_Loss=0.1240,local_loss: 0.08269911061264743,global_loss: 0.04128464683890343\n",
      "Step 73: BSNE_Loss=0.3400,local_loss: 0.07236486439769275,global_loss: 0.2676827609539032\n",
      "Step 74: BSNE_Loss=-0.1832,local_loss: 0.09187983887481424,global_loss: -0.2750622034072876\n",
      "Center node 5741 finished at step 75 with loss -0.1832\n",
      "\n",
      "=== Pretraining Epoch 28 (Center Node: 10085) ===\n",
      "Step 0: BSNE_Loss=0.6724,local_loss: 0.0606297990433341,global_loss: 0.6117764115333557\n",
      "Step 1: BSNE_Loss=0.8744,local_loss: 0.05518714631651707,global_loss: 0.819180965423584\n",
      "Step 2: BSNE_Loss=0.5645,local_loss: 0.057163729514003975,global_loss: 0.5073674917221069\n",
      "Step 3: BSNE_Loss=0.7666,local_loss: 0.05532043939545796,global_loss: 0.7112717628479004\n",
      "Step 4: BSNE_Loss=0.8634,local_loss: 0.053515769311859925,global_loss: 0.809849202632904\n",
      "Step 5: BSNE_Loss=0.7627,local_loss: 0.07202019739630533,global_loss: 0.6906836032867432\n",
      "Step 6: BSNE_Loss=0.5997,local_loss: 0.06860239074209395,global_loss: 0.531069815158844\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 7: BSNE_Loss=0.7919,local_loss: 0.061647680207860475,global_loss: 0.7302296757698059\n",
      "Step 8: BSNE_Loss=0.5020,local_loss: 0.05445015426296361,global_loss: 0.4475536346435547\n",
      "Step 9: BSNE_Loss=0.8149,local_loss: 0.05536413901060196,global_loss: 0.7595346570014954\n",
      "Step 10: BSNE_Loss=0.7747,local_loss: 0.062172931213585726,global_loss: 0.7125508189201355\n",
      "Step 11: BSNE_Loss=0.5073,local_loss: 0.054528922891997705,global_loss: 0.4527907967567444\n",
      "Step 12: BSNE_Loss=0.6876,local_loss: 0.05633474102568732,global_loss: 0.631243884563446\n",
      "Step 13: BSNE_Loss=0.6716,local_loss: 0.05862364230840003,global_loss: 0.6130250096321106\n",
      "Step 14: BSNE_Loss=0.6266,local_loss: 0.05359238888604151,global_loss: 0.5730480551719666\n",
      "Step 15: BSNE_Loss=0.8781,local_loss: 0.051273411003462006,global_loss: 0.8268686532974243\n",
      "Step 16: BSNE_Loss=0.9550,local_loss: 0.050025165712256854,global_loss: 0.9049351811408997\n",
      "Step 17: BSNE_Loss=0.5503,local_loss: 0.04430884038643245,global_loss: 0.5059890747070312\n",
      "Step 18: BSNE_Loss=0.7186,local_loss: 0.045123667859740535,global_loss: 0.673449695110321\n",
      "Step 19: BSNE_Loss=0.6382,local_loss: 0.0500334658179774,global_loss: 0.588211715221405\n",
      "Step 20: BSNE_Loss=0.5995,local_loss: 0.04724123283658234,global_loss: 0.5522713661193848\n",
      "Step 21: BSNE_Loss=0.5965,local_loss: 0.04146620879332781,global_loss: 0.555051326751709\n",
      "Step 22: BSNE_Loss=0.7842,local_loss: 0.038129923794984304,global_loss: 0.7460542321205139\n",
      "Step 23: BSNE_Loss=0.5776,local_loss: 0.028279041378148292,global_loss: 0.5493108034133911\n",
      "Step 24: BSNE_Loss=0.6247,local_loss: 0.03332281765554234,global_loss: 0.5913947224617004\n",
      "Step 25: BSNE_Loss=0.7050,local_loss: 0.0329366571651287,global_loss: 0.6720240116119385\n",
      "Step 26: BSNE_Loss=0.6440,local_loss: 0.033493403906573474,global_loss: 0.6104693412780762\n",
      "Step 27: BSNE_Loss=0.5434,local_loss: 0.02353087971023961,global_loss: 0.5198419094085693\n",
      "Step 28: BSNE_Loss=0.5475,local_loss: 0.021475060650366702,global_loss: 0.5260603427886963\n",
      "Step 29: BSNE_Loss=0.5738,local_loss: 0.015789018342474478,global_loss: 0.558018147945404\n",
      "Step 30: BSNE_Loss=0.4764,local_loss: 0.02444212930230256,global_loss: 0.45191681385040283\n",
      "Step 31: BSNE_Loss=0.5079,local_loss: 0.02108687797160394,global_loss: 0.4867655038833618\n",
      "Step 32: BSNE_Loss=0.5161,local_loss: 0.020309033946470927,global_loss: 0.4957576394081116\n",
      "Step 33: BSNE_Loss=0.4936,local_loss: 0.024111378074889528,global_loss: 0.46947771310806274\n",
      "Step 34: BSNE_Loss=0.5397,local_loss: 0.018320821076351503,global_loss: 0.5213911533355713\n",
      "Step 35: BSNE_Loss=0.4785,local_loss: 0.024703928075587547,global_loss: 0.4537529945373535\n",
      "Step 36: BSNE_Loss=0.5196,local_loss: 0.020679627510904176,global_loss: 0.4989436864852905\n",
      "Step 37: BSNE_Loss=0.4954,local_loss: 0.024679869811724275,global_loss: 0.47072863578796387\n",
      "Step 38: BSNE_Loss=0.5063,local_loss: 0.01967798103659837,global_loss: 0.4866032004356384\n",
      "Step 39: BSNE_Loss=0.5509,local_loss: 0.022402603460630846,global_loss: 0.5285239815711975\n",
      "Step 40: BSNE_Loss=0.4831,local_loss: 0.02198992152712543,global_loss: 0.46108415722846985\n",
      "Step 41: BSNE_Loss=0.5507,local_loss: 0.016272116735472388,global_loss: 0.5344362258911133\n",
      "Step 42: BSNE_Loss=0.5226,local_loss: 0.01880759858937107,global_loss: 0.5037989616394043\n",
      "Step 43: BSNE_Loss=0.3897,local_loss: 0.03183245045632811,global_loss: 0.357878714799881\n",
      "Step 44: BSNE_Loss=0.6228,local_loss: 0.018418271085085054,global_loss: 0.6043936014175415\n",
      "Step 45: BSNE_Loss=0.5264,local_loss: 0.019116418863803917,global_loss: 0.5072576403617859\n",
      "Step 46: BSNE_Loss=0.5340,local_loss: 0.02040647466489317,global_loss: 0.5135490298271179\n",
      "Step 47: BSNE_Loss=0.4169,local_loss: 0.027724427514545433,global_loss: 0.38918909430503845\n",
      "Step 48: BSNE_Loss=0.5751,local_loss: 0.015824864128272807,global_loss: 0.5593216419219971\n",
      "Step 49: BSNE_Loss=0.5451,local_loss: 0.015618218337723429,global_loss: 0.5294445157051086\n",
      "Step 50: BSNE_Loss=0.5450,local_loss: 0.018480759412470505,global_loss: 0.526559591293335\n",
      "Step 51: BSNE_Loss=0.4924,local_loss: 0.025948928581863893,global_loss: 0.46642985939979553\n",
      "Step 52: BSNE_Loss=0.4698,local_loss: 0.02312332397151575,global_loss: 0.44670578837394714\n",
      "Step 53: BSNE_Loss=0.4007,local_loss: 0.029811149197495675,global_loss: 0.37088608741760254\n",
      "Step 54: BSNE_Loss=0.4596,local_loss: 0.026753519446966696,global_loss: 0.4328268766403198\n",
      "Step 55: BSNE_Loss=0.4899,local_loss: 0.023766647634385206,global_loss: 0.46614986658096313\n",
      "Step 56: BSNE_Loss=0.5596,local_loss: 0.018218528485521183,global_loss: 0.541375994682312\n",
      "Step 57: BSNE_Loss=0.5312,local_loss: 0.019238553624000215,global_loss: 0.5120033025741577\n",
      "Step 58: BSNE_Loss=0.4530,local_loss: 0.02501440660495666,global_loss: 0.4280183017253876\n",
      "Step 59: BSNE_Loss=0.5375,local_loss: 0.01656449692849415,global_loss: 0.520984947681427\n",
      "Step 60: BSNE_Loss=0.5583,local_loss: 0.018527937601797204,global_loss: 0.5397745370864868\n",
      "Step 61: BSNE_Loss=0.4591,local_loss: 0.027936905158816375,global_loss: 0.4311891198158264\n",
      "Step 62: BSNE_Loss=0.5489,local_loss: 0.024116271062678413,global_loss: 0.5248019099235535\n",
      "Step 63: BSNE_Loss=0.3540,local_loss: 0.03388306995778371,global_loss: 0.3200823962688446\n",
      "Step 64: BSNE_Loss=0.5059,local_loss: 0.020587689557040253,global_loss: 0.4853145480155945\n",
      "Step 65: BSNE_Loss=0.4809,local_loss: 0.023812788221956427,global_loss: 0.4571341276168823\n",
      "Step 66: BSNE_Loss=0.3375,local_loss: 0.03334587937975197,global_loss: 0.3042036294937134\n",
      "Step 67: BSNE_Loss=0.5256,local_loss: 0.023409143521122642,global_loss: 0.502228856086731\n",
      "Step 68: BSNE_Loss=0.4329,local_loss: 0.02536518477937301,global_loss: 0.40754005312919617\n",
      "Step 69: BSNE_Loss=0.4401,local_loss: 0.024409341552003902,global_loss: 0.41572123765945435\n",
      "Step 70: BSNE_Loss=0.3118,local_loss: 0.037216972950533084,global_loss: 0.27455127239227295\n",
      "Step 71: BSNE_Loss=0.4687,local_loss: 0.027792699074704098,global_loss: 0.44090691208839417\n",
      "Step 72: BSNE_Loss=0.4669,local_loss: 0.030251947731967283,global_loss: 0.43667325377464294\n",
      "Step 73: BSNE_Loss=0.4130,local_loss: 0.02996952700697126,global_loss: 0.3830520212650299\n",
      "Step 74: BSNE_Loss=0.5317,local_loss: 0.021603765519328452,global_loss: 0.5101163387298584\n",
      "Step 75: BSNE_Loss=0.4756,local_loss: 0.021674166264835915,global_loss: 0.4539420008659363\n",
      "Step 76: BSNE_Loss=0.3521,local_loss: 0.03410538934318433,global_loss: 0.3180420994758606\n",
      "Step 77: BSNE_Loss=0.3913,local_loss: 0.02986896603193772,global_loss: 0.3614548444747925\n",
      "Step 78: BSNE_Loss=0.4093,local_loss: 0.02786696432222487,global_loss: 0.3814155161380768\n",
      "Step 79: BSNE_Loss=0.4112,local_loss: 0.03114891981532027,global_loss: 0.38009846210479736\n",
      "Step 80: BSNE_Loss=0.3858,local_loss: 0.03489400898087267,global_loss: 0.35086047649383545\n",
      "Step 81: BSNE_Loss=0.4312,local_loss: 0.04185110439097353,global_loss: 0.38939157128334045\n",
      "Step 82: BSNE_Loss=0.4128,local_loss: 0.03168267177115491,global_loss: 0.38114920258522034\n",
      "Step 83: BSNE_Loss=0.4124,local_loss: 0.03264407108602935,global_loss: 0.3797728419303894\n",
      "Step 84: BSNE_Loss=0.3933,local_loss: 0.033998633688688426,global_loss: 0.3593013286590576\n",
      "Step 85: BSNE_Loss=0.4878,local_loss: 0.030000473590360892,global_loss: 0.4577607214450836\n",
      "Step 86: BSNE_Loss=0.4442,local_loss: 0.03269596748360849,global_loss: 0.411526620388031\n",
      "Step 87: BSNE_Loss=0.4053,local_loss: 0.03647415923257053,global_loss: 0.3688490092754364\n",
      "Step 88: BSNE_Loss=0.2735,local_loss: 0.04149949118146938,global_loss: 0.232008695602417\n",
      "Step 89: BSNE_Loss=0.3417,local_loss: 0.03457240423281416,global_loss: 0.3071431517601013\n",
      "Step 90: BSNE_Loss=0.3535,local_loss: 0.033176441229303716,global_loss: 0.3202846646308899\n",
      "Step 91: BSNE_Loss=0.3119,local_loss: 0.03363079038239353,global_loss: 0.2782438099384308\n",
      "Step 92: BSNE_Loss=0.2980,local_loss: 0.03934161874366351,global_loss: 0.2586154043674469\n",
      "Step 93: BSNE_Loss=0.2959,local_loss: 0.03740971049148728,global_loss: 0.25846031308174133\n",
      "Step 94: BSNE_Loss=0.3040,local_loss: 0.042488466201578624,global_loss: 0.2614905834197998\n",
      "Step 95: BSNE_Loss=0.4577,local_loss: 0.031094778881094187,global_loss: 0.42659878730773926\n",
      "Step 96: BSNE_Loss=0.2454,local_loss: 0.04238030966304067,global_loss: 0.20301608741283417\n",
      "Step 97: BSNE_Loss=0.5346,local_loss: 0.021552544778985237,global_loss: 0.5130124092102051\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 98: BSNE_Loss=0.4057,local_loss: 0.029726723754067118,global_loss: 0.37602224946022034\n",
      "Step 99: BSNE_Loss=0.2385,local_loss: 0.04255466708991146,global_loss: 0.19593597948551178\n",
      "Step 100: BSNE_Loss=0.5154,local_loss: 0.03476880793586848,global_loss: 0.4806772470474243\n",
      "Step 101: BSNE_Loss=0.4084,local_loss: 0.03347462492079607,global_loss: 0.3749501407146454\n",
      "Step 102: BSNE_Loss=0.4765,local_loss: 0.033547775092736015,global_loss: 0.4429562985897064\n",
      "Step 103: BSNE_Loss=0.3076,local_loss: 0.034676030391052444,global_loss: 0.272889643907547\n",
      "Step 104: BSNE_Loss=0.5517,local_loss: 0.02090957108822772,global_loss: 0.5308122038841248\n",
      "Step 105: BSNE_Loss=0.3076,local_loss: 0.0352066095415923,global_loss: 0.2723625898361206\n",
      "Step 106: BSNE_Loss=0.4951,local_loss: 0.03268230159420097,global_loss: 0.46239274740219116\n",
      "Step 107: BSNE_Loss=0.4633,local_loss: 0.029484567771087232,global_loss: 0.4338197112083435\n",
      "Step 108: BSNE_Loss=0.4499,local_loss: 0.03286489429590746,global_loss: 0.41705799102783203\n",
      "Step 109: BSNE_Loss=0.2961,local_loss: 0.03787238429246821,global_loss: 0.2582765519618988\n",
      "Step 110: BSNE_Loss=0.4559,local_loss: 0.03313952980178712,global_loss: 0.42277002334594727\n",
      "Step 111: BSNE_Loss=0.4664,local_loss: 0.03355411219028417,global_loss: 0.43280914425849915\n",
      "Step 112: BSNE_Loss=0.2883,local_loss: 0.043929801027647056,global_loss: 0.2443353235721588\n",
      "Step 113: BSNE_Loss=0.5614,local_loss: 0.029066705878171645,global_loss: 0.5323560237884521\n",
      "Step 114: BSNE_Loss=0.2633,local_loss: 0.043018342376036606,global_loss: 0.2202989012002945\n",
      "Step 115: BSNE_Loss=0.3210,local_loss: 0.04137426418753569,global_loss: 0.27958905696868896\n",
      "Step 116: BSNE_Loss=0.4783,local_loss: 0.02901764727723249,global_loss: 0.44927921891212463\n",
      "Step 117: BSNE_Loss=0.4844,local_loss: 0.030548721914917937,global_loss: 0.4538837671279907\n",
      "Step 118: BSNE_Loss=0.5348,local_loss: 0.025546572656645318,global_loss: 0.5092236995697021\n",
      "Step 119: BSNE_Loss=0.5003,local_loss: 0.034029719271929784,global_loss: 0.46626347303390503\n",
      "Step 120: BSNE_Loss=0.3689,local_loss: 0.03477274953245714,global_loss: 0.33417055010795593\n",
      "Step 121: BSNE_Loss=0.3450,local_loss: 0.034114254303033734,global_loss: 0.31090742349624634\n",
      "Step 122: BSNE_Loss=0.1792,local_loss: 0.0506463373535321,global_loss: 0.12854896485805511\n",
      "Step 123: BSNE_Loss=0.4868,local_loss: 0.03011575476946053,global_loss: 0.45665159821510315\n",
      "Step 124: BSNE_Loss=0.3824,local_loss: 0.03194378393669074,global_loss: 0.3504132032394409\n",
      "Step 125: BSNE_Loss=0.4571,local_loss: 0.04030811726027028,global_loss: 0.4167602062225342\n",
      "Step 126: BSNE_Loss=0.7932,local_loss: 0.022006009302410794,global_loss: 0.7712414264678955\n",
      "Step 127: BSNE_Loss=0.5588,local_loss: 0.029268201731674263,global_loss: 0.5294889807701111\n",
      "Step 128: BSNE_Loss=0.4149,local_loss: 0.0297055374236489,global_loss: 0.3852078318595886\n",
      "Step 129: BSNE_Loss=0.3280,local_loss: 0.03622796452100639,global_loss: 0.29173198342323303\n",
      "Step 130: BSNE_Loss=0.4127,local_loss: 0.026285739878750332,global_loss: 0.38642358779907227\n",
      "Step 131: BSNE_Loss=0.3616,local_loss: 0.03252726853413408,global_loss: 0.32905954122543335\n",
      "Step 132: BSNE_Loss=0.2914,local_loss: 0.04931844868388609,global_loss: 0.24207279086112976\n",
      "Step 133: BSNE_Loss=0.2416,local_loss: 0.055746991960359864,global_loss: 0.185881108045578\n",
      "Step 134: BSNE_Loss=0.4703,local_loss: 0.03002818231432184,global_loss: 0.4402513802051544\n",
      "Step 135: BSNE_Loss=0.3275,local_loss: 0.03890447718848648,global_loss: 0.2885885536670685\n",
      "Step 136: BSNE_Loss=0.3671,local_loss: 0.03804524470353142,global_loss: 0.3290625512599945\n",
      "Step 137: BSNE_Loss=0.3357,local_loss: 0.039282472120670646,global_loss: 0.29639333486557007\n",
      "Step 138: BSNE_Loss=0.5772,local_loss: 0.030993265967343628,global_loss: 0.5461903810501099\n",
      "Step 139: BSNE_Loss=0.2021,local_loss: 0.039147981117378564,global_loss: 0.1629233956336975\n",
      "Step 140: BSNE_Loss=0.4387,local_loss: 0.03961287529873597,global_loss: 0.3991086483001709\n",
      "Step 141: BSNE_Loss=0.3618,local_loss: 0.03488959834303142,global_loss: 0.3269522786140442\n",
      "Step 142: BSNE_Loss=0.2274,local_loss: 0.04330312377331141,global_loss: 0.18405161798000336\n",
      "Step 143: BSNE_Loss=0.2651,local_loss: 0.04681817513815109,global_loss: 0.21827512979507446\n",
      "Step 144: BSNE_Loss=0.3763,local_loss: 0.027053691096330244,global_loss: 0.3492484986782074\n",
      "Step 145: BSNE_Loss=0.3551,local_loss: 0.04020297968112428,global_loss: 0.3148505985736847\n",
      "Step 146: BSNE_Loss=0.3286,local_loss: 0.03389127116708456,global_loss: 0.29472848773002625\n",
      "Step 147: BSNE_Loss=0.1525,local_loss: 0.04665502748064507,global_loss: 0.10587852448225021\n",
      "Step 148: BSNE_Loss=0.1249,local_loss: 0.05128307880830802,global_loss: 0.07357001304626465\n",
      "Step 149: BSNE_Loss=0.3420,local_loss: 0.04245096602584098,global_loss: 0.29951801896095276\n",
      "Step 150: BSNE_Loss=0.0505,local_loss: 0.05306390989543319,global_loss: -0.0025780024006962776\n",
      "Step 151: BSNE_Loss=0.2125,local_loss: 0.04041785803441543,global_loss: 0.1721198558807373\n",
      "Step 152: BSNE_Loss=0.4252,local_loss: 0.04279924792406149,global_loss: 0.3823989927768707\n",
      "Step 153: BSNE_Loss=0.5552,local_loss: 0.038430273756582325,global_loss: 0.5167821645736694\n",
      "Step 154: BSNE_Loss=0.4597,local_loss: 0.03171489716245499,global_loss: 0.4279870390892029\n",
      "Step 155: BSNE_Loss=0.3114,local_loss: 0.04794128638042106,global_loss: 0.2634953260421753\n",
      "Step 156: BSNE_Loss=0.2883,local_loss: 0.046249060802548515,global_loss: 0.2420039176940918\n",
      "Step 157: BSNE_Loss=0.4725,local_loss: 0.0433578150294425,global_loss: 0.42911162972450256\n",
      "Step 158: BSNE_Loss=0.1914,local_loss: 0.061713289371643824,global_loss: 0.12963955104351044\n",
      "Step 159: BSNE_Loss=0.2195,local_loss: 0.04617379187924538,global_loss: 0.17334239184856415\n",
      "Step 160: BSNE_Loss=0.2289,local_loss: 0.043388577696995737,global_loss: 0.185516357421875\n",
      "Step 161: BSNE_Loss=0.3566,local_loss: 0.04266471443000584,global_loss: 0.313912957906723\n",
      "Step 162: BSNE_Loss=0.3126,local_loss: 0.045726096980943924,global_loss: 0.2668246030807495\n",
      "Step 163: BSNE_Loss=0.3460,local_loss: 0.03644384077549571,global_loss: 0.3095386028289795\n",
      "Step 164: BSNE_Loss=0.3118,local_loss: 0.05613557275137569,global_loss: 0.2556294798851013\n",
      "Step 165: BSNE_Loss=0.1646,local_loss: 0.05604050095764167,global_loss: 0.10851800441741943\n",
      "Step 166: BSNE_Loss=0.5458,local_loss: 0.028806223964168202,global_loss: 0.5169475674629211\n",
      "Step 167: BSNE_Loss=0.2447,local_loss: 0.04404192819342559,global_loss: 0.20068980753421783\n",
      "Step 168: BSNE_Loss=0.0393,local_loss: 0.04791152078635385,global_loss: -0.008610856719315052\n",
      "Center node 10085 finished at step 169 with loss 0.0393\n",
      "\n",
      "=== Pretraining Epoch 29 (Center Node: 4512) ===\n",
      "Step 0: BSNE_Loss=0.1755,local_loss: 0.0453441302090435,global_loss: 0.13019835948944092\n",
      "Step 1: BSNE_Loss=0.3790,local_loss: 0.03766500864879149,global_loss: 0.34136661887168884\n",
      "Step 2: BSNE_Loss=0.2894,local_loss: 0.03716004216880617,global_loss: 0.252230703830719\n",
      "Step 3: BSNE_Loss=0.3708,local_loss: 0.04674592072231031,global_loss: 0.32404136657714844\n",
      "Step 4: BSNE_Loss=0.2978,local_loss: 0.03689740520290692,global_loss: 0.2608603239059448\n",
      "Step 5: BSNE_Loss=0.2965,local_loss: 0.04784715043389552,global_loss: 0.24860458076000214\n",
      "Step 6: BSNE_Loss=0.1736,local_loss: 0.0502181820433167,global_loss: 0.12333229929208755\n",
      "Step 7: BSNE_Loss=0.2051,local_loss: 0.0632576257089473,global_loss: 0.14185945689678192\n",
      "Step 8: BSNE_Loss=0.2273,local_loss: 0.048015869251017904,global_loss: 0.17928709089756012\n",
      "Step 9: BSNE_Loss=0.0704,local_loss: 0.07053500419281894,global_loss: -0.0001806780055630952\n",
      "Step 10: BSNE_Loss=0.1842,local_loss: 0.047788064025293266,global_loss: 0.13643240928649902\n",
      "Step 11: BSNE_Loss=0.2062,local_loss: 0.03809360030029116,global_loss: 0.16807612776756287\n",
      "Step 12: BSNE_Loss=0.4287,local_loss: 0.04773400866407906,global_loss: 0.3809777796268463\n",
      "Step 13: BSNE_Loss=0.3744,local_loss: 0.06081562218136755,global_loss: 0.313607394695282\n",
      "Step 14: BSNE_Loss=0.4266,local_loss: 0.046196637165319576,global_loss: 0.3804325461387634\n",
      "Step 15: BSNE_Loss=0.2175,local_loss: 0.044844919407445616,global_loss: 0.17269285023212433\n",
      "Step 16: BSNE_Loss=0.4173,local_loss: 0.037256770385882945,global_loss: 0.38005053997039795\n",
      "Step 17: BSNE_Loss=0.3123,local_loss: 0.07083253357638894,global_loss: 0.24147875607013702\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 18: BSNE_Loss=-0.0371,local_loss: 0.07448367536300249,global_loss: -0.11162269860506058\n",
      "Center node 4512 finished at step 19 with loss -0.0371\n",
      "\n",
      "=== Pretraining Epoch 30 (Center Node: 3115) ===\n",
      "Step 0: BSNE_Loss=-0.1752,local_loss: 0.03177727119884012,global_loss: -0.20693616569042206\n",
      "Center node 3115 finished at step 1 with loss -0.1752\n",
      "\n",
      "=== Pretraining Epoch 31 (Center Node: 11112) ===\n",
      "Step 0: BSNE_Loss=0.1517,local_loss: 0.06027140695053078,global_loss: 0.09139453619718552\n",
      "Step 1: BSNE_Loss=0.0644,local_loss: 0.050045514136016864,global_loss: 0.014371813274919987\n",
      "Step 2: BSNE_Loss=0.1444,local_loss: 0.06242546030014172,global_loss: 0.081940658390522\n",
      "Step 3: BSNE_Loss=0.2393,local_loss: 0.06997407452819382,global_loss: 0.16933582723140717\n",
      "Step 4: BSNE_Loss=-0.1139,local_loss: 0.06570574124943693,global_loss: -0.17962969839572906\n",
      "Center node 11112 finished at step 5 with loss -0.1139\n",
      "\n",
      "=== Pretraining Epoch 32 (Center Node: 5476) ===\n",
      "Step 0: BSNE_Loss=0.2049,local_loss: 0.06961701697445968,global_loss: 0.13529779016971588\n",
      "Step 1: BSNE_Loss=0.2761,local_loss: 0.08843584211153256,global_loss: 0.18768639862537384\n",
      "Step 2: BSNE_Loss=0.2043,local_loss: 0.07747341765834062,global_loss: 0.12682346999645233\n",
      "Step 3: BSNE_Loss=0.3942,local_loss: 0.07268849207050047,global_loss: 0.3215027451515198\n",
      "Step 4: BSNE_Loss=0.4229,local_loss: 0.05577782141152103,global_loss: 0.36708131432533264\n",
      "Step 5: BSNE_Loss=0.2945,local_loss: 0.08007946685019124,global_loss: 0.21444542706012726\n",
      "Step 6: BSNE_Loss=0.3362,local_loss: 0.061198490820954736,global_loss: 0.27501994371414185\n",
      "Step 7: BSNE_Loss=0.3625,local_loss: 0.05818229242382876,global_loss: 0.3043607473373413\n",
      "Step 12: BSNE_Loss=0.3165,local_loss: 0.0633083612589076,global_loss: 0.2531532645225525\n",
      "Step 13: BSNE_Loss=0.0140,local_loss: 0.10059787193583197,global_loss: -0.08659074455499649\n",
      "Center node 5476 finished at step 14 with loss 0.0140\n",
      "\n",
      "=== Pretraining Epoch 33 (Center Node: 7563) ===\n",
      "Step 0: BSNE_Loss=0.4014,local_loss: 0.059753268888964914,global_loss: 0.3416142761707306\n",
      "Step 1: BSNE_Loss=0.2706,local_loss: 0.05168691780528667,global_loss: 0.21889421343803406\n",
      "Step 2: BSNE_Loss=0.6219,local_loss: 0.029185737611339076,global_loss: 0.5926975607872009\n",
      "Step 3: BSNE_Loss=0.5441,local_loss: 0.047120184394184986,global_loss: 0.4970179796218872\n",
      "Step 4: BSNE_Loss=0.7369,local_loss: 0.04398883406146499,global_loss: 0.6929468512535095\n",
      "Step 5: BSNE_Loss=0.8412,local_loss: 0.04005190613090176,global_loss: 0.8011192679405212\n",
      "Step 6: BSNE_Loss=0.7303,local_loss: 0.035295405471376225,global_loss: 0.6950191855430603\n",
      "Step 7: BSNE_Loss=0.4483,local_loss: 0.04984446460782978,global_loss: 0.3984092175960541\n",
      "Step 8: BSNE_Loss=0.5940,local_loss: 0.05589245084774946,global_loss: 0.5380867719650269\n",
      "Step 9: BSNE_Loss=0.4777,local_loss: 0.04892575889790443,global_loss: 0.4287360608577728\n",
      "Step 10: BSNE_Loss=0.7568,local_loss: 0.03751203427104022,global_loss: 0.7192675471305847\n",
      "Step 11: BSNE_Loss=0.5964,local_loss: 0.04982374766789475,global_loss: 0.546614944934845\n",
      "Step 12: BSNE_Loss=0.1804,local_loss: 0.044650184458828714,global_loss: 0.13573169708251953\n",
      "Step 13: BSNE_Loss=0.1866,local_loss: 0.03630139130985585,global_loss: 0.15031546354293823\n",
      "Step 14: BSNE_Loss=0.4281,local_loss: 0.039550203281689046,global_loss: 0.38859811425209045\n",
      "Step 15: BSNE_Loss=0.6155,local_loss: 0.04171120550389022,global_loss: 0.573837161064148\n",
      "Step 16: BSNE_Loss=0.2823,local_loss: 0.05276564369456804,global_loss: 0.22949086129665375\n",
      "Step 17: BSNE_Loss=0.4707,local_loss: 0.047635662982319076,global_loss: 0.4230203330516815\n",
      "Step 18: BSNE_Loss=0.0459,local_loss: 0.06055513988533705,global_loss: -0.014700711704790592\n",
      "Center node 7563 finished at step 19 with loss 0.0459\n",
      "\n",
      "=== Pretraining Epoch 34 (Center Node: 11448) ===\n",
      "Step 0: BSNE_Loss=0.4871,local_loss: 0.06311254245258385,global_loss: 0.424014151096344\n",
      "Step 1: BSNE_Loss=0.0880,local_loss: 0.09933114538981781,global_loss: -0.011340605095028877\n",
      "Step 2: BSNE_Loss=0.4346,local_loss: 0.08975794745462323,global_loss: 0.3448527455329895\n",
      "Step 3: BSNE_Loss=0.3981,local_loss: 0.08387608422694638,global_loss: 0.3141775131225586\n",
      "Step 4: BSNE_Loss=0.4868,local_loss: 0.05433299754914058,global_loss: 0.43246087431907654\n",
      "Step 5: BSNE_Loss=0.1534,local_loss: 0.07835335807877838,global_loss: 0.07506982237100601\n",
      "Step 6: BSNE_Loss=0.3392,local_loss: 0.06460325452722668,global_loss: 0.2746267020702362\n",
      "Step 7: BSNE_Loss=0.1059,local_loss: 0.07593954598015026,global_loss: 0.029948385432362556\n",
      "Step 8: BSNE_Loss=0.2483,local_loss: 0.07384876683507542,global_loss: 0.1744847446680069\n",
      "Step 9: BSNE_Loss=0.4560,local_loss: 0.05942691938241904,global_loss: 0.3965596556663513\n",
      "Step 10: BSNE_Loss=0.1769,local_loss: 0.0835799709539416,global_loss: 0.09335466474294662\n",
      "Step 11: BSNE_Loss=0.1038,local_loss: 0.064947087868096,global_loss: 0.03886539116501808\n",
      "Step 12: BSNE_Loss=0.1212,local_loss: 0.08148997478419472,global_loss: 0.039710115641355515\n",
      "Step 13: BSNE_Loss=0.0000,local_loss: 0.1109714237494373,global_loss: -0.1109381765127182\n",
      "Center node 11448 finished at step 14 with loss 0.0000\n",
      "\n",
      "=== Pretraining Epoch 35 (Center Node: 4416) ===\n",
      "Step 0: BSNE_Loss=0.2902,local_loss: 0.03545164843626088,global_loss: 0.2547367215156555\n",
      "Step 1: BSNE_Loss=0.3700,local_loss: 0.041677957519869455,global_loss: 0.32836246490478516\n",
      "Step 2: BSNE_Loss=0.2617,local_loss: 0.02745822578128463,global_loss: 0.23423729836940765\n",
      "Step 3: BSNE_Loss=-0.1549,local_loss: 0.03528985841290174,global_loss: -0.1902381032705307\n",
      "Center node 4416 finished at step 4 with loss -0.1549\n",
      "\n",
      "=== Pretraining Epoch 36 (Center Node: 387) ===\n",
      "Step 0: BSNE_Loss=0.4605,local_loss: 0.06837959541193503,global_loss: 0.39209598302841187\n",
      "Step 1: BSNE_Loss=0.1304,local_loss: 0.05094333101680052,global_loss: 0.07949855923652649\n",
      "Step 2: BSNE_Loss=0.4421,local_loss: 0.06364114763414522,global_loss: 0.37842699885368347\n",
      "Step 3: BSNE_Loss=0.4560,local_loss: 0.04867933787610476,global_loss: 0.40730106830596924\n",
      "Step 4: BSNE_Loss=0.3991,local_loss: 0.06062604624541312,global_loss: 0.33847931027412415\n",
      "Step 5: BSNE_Loss=0.3053,local_loss: 0.04645118816463563,global_loss: 0.25883784890174866\n",
      "Step 6: BSNE_Loss=0.4399,local_loss: 0.04112411882260299,global_loss: 0.3987480401992798\n",
      "Step 7: BSNE_Loss=0.3394,local_loss: 0.04900773812532654,global_loss: 0.2904120683670044\n",
      "Step 8: BSNE_Loss=0.3294,local_loss: 0.059443462763312804,global_loss: 0.2699183523654938\n",
      "Step 9: BSNE_Loss=0.3999,local_loss: 0.05478400976780593,global_loss: 0.34513238072395325\n",
      "Step 10: BSNE_Loss=0.2564,local_loss: 0.03566443850741669,global_loss: 0.22073596715927124\n",
      "Step 11: BSNE_Loss=0.2322,local_loss: 0.02814661246089143,global_loss: 0.20403581857681274\n",
      "Step 12: BSNE_Loss=0.2669,local_loss: 0.03713570860493223,global_loss: 0.22978059947490692\n",
      "Step 13: BSNE_Loss=0.2808,local_loss: 0.026441374719505313,global_loss: 0.25435760617256165\n",
      "Step 14: BSNE_Loss=0.1171,local_loss: 0.01872203842807068,global_loss: 0.09840160608291626\n",
      "Step 15: BSNE_Loss=0.5143,local_loss: 0.045171714414261696,global_loss: 0.46917247772216797\n",
      "Step 16: BSNE_Loss=0.3410,local_loss: 0.028577453141144564,global_loss: 0.31246218085289\n",
      "Step 17: BSNE_Loss=0.3102,local_loss: 0.031700546046228295,global_loss: 0.2785297632217407\n",
      "Step 18: BSNE_Loss=0.3711,local_loss: 0.028245685467484682,global_loss: 0.3428259491920471\n",
      "Step 19: BSNE_Loss=0.2257,local_loss: 0.017793532584981446,global_loss: 0.2078973948955536\n",
      "Step 20: BSNE_Loss=0.1969,local_loss: 0.022956576202003344,global_loss: 0.17393513023853302\n",
      "Step 21: BSNE_Loss=0.2294,local_loss: 0.018754314805779954,global_loss: 0.2106066793203354\n",
      "Step 22: BSNE_Loss=0.2406,local_loss: 0.019936033971619638,global_loss: 0.220615416765213\n",
      "Step 23: BSNE_Loss=0.3009,local_loss: 0.023057319351560263,global_loss: 0.277810662984848\n",
      "Step 24: BSNE_Loss=0.2012,local_loss: 0.02310856927445106,global_loss: 0.17809735238552094\n",
      "Step 25: BSNE_Loss=0.3425,local_loss: 0.028532201225536173,global_loss: 0.3140055239200592\n",
      "Step 26: BSNE_Loss=0.2940,local_loss: 0.03566177708318448,global_loss: 0.2582974433898926\n",
      "Step 27: BSNE_Loss=0.1862,local_loss: 0.0400473879666458,global_loss: 0.1461932212114334\n",
      "Step 28: BSNE_Loss=0.2903,local_loss: 0.03339331202480114,global_loss: 0.2569275498390198\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 29: BSNE_Loss=0.1752,local_loss: 0.051327588077949224,global_loss: 0.12389559298753738\n",
      "Step 30: BSNE_Loss=0.0959,local_loss: 0.03855531774323717,global_loss: 0.057352207601070404\n",
      "Step 31: BSNE_Loss=0.3113,local_loss: 0.042620057391647745,global_loss: 0.26872357726097107\n",
      "Step 32: BSNE_Loss=0.0210,local_loss: 0.03231262882397208,global_loss: -0.011272968724370003\n",
      "Center node 387 finished at step 33 with loss 0.0210\n",
      "\n",
      "=== Pretraining Epoch 37 (Center Node: 5672) ===\n",
      "Step 0: BSNE_Loss=0.4545,local_loss: 0.03277503185633948,global_loss: 0.4217461049556732\n",
      "Step 1: BSNE_Loss=0.4144,local_loss: 0.032886271410133894,global_loss: 0.38148340582847595\n",
      "Step 2: BSNE_Loss=0.4232,local_loss: 0.02925436876791594,global_loss: 0.39395952224731445\n",
      "Step 3: BSNE_Loss=0.3905,local_loss: 0.0495165533344496,global_loss: 0.34096720814704895\n",
      "Step 4: BSNE_Loss=0.5090,local_loss: 0.03227762752697565,global_loss: 0.47667354345321655\n",
      "Step 5: BSNE_Loss=0.6304,local_loss: 0.02854682488656573,global_loss: 0.6018447279930115\n",
      "Step 6: BSNE_Loss=0.3906,local_loss: 0.031321702919594775,global_loss: 0.3592931032180786\n",
      "Step 7: BSNE_Loss=0.3631,local_loss: 0.03478106591686911,global_loss: 0.328354150056839\n",
      "Step 8: BSNE_Loss=0.3380,local_loss: 0.03328218505593962,global_loss: 0.3046683669090271\n",
      "Step 9: BSNE_Loss=0.3933,local_loss: 0.031328904089416,global_loss: 0.36198538541793823\n",
      "Step 10: BSNE_Loss=0.0457,local_loss: 0.054164264378989246,global_loss: -0.008440610952675343\n",
      "Center node 5672 finished at step 11 with loss 0.0457\n",
      "\n",
      "=== Pretraining Epoch 38 (Center Node: 3745) ===\n",
      "Step 0: BSNE_Loss=0.4798,local_loss: 0.029450722142927977,global_loss: 0.45036113262176514\n",
      "Step 1: BSNE_Loss=0.2437,local_loss: 0.03624400454869519,global_loss: 0.20745283365249634\n",
      "Step 2: BSNE_Loss=0.6392,local_loss: 0.03315461744252608,global_loss: 0.6060230731964111\n",
      "Step 3: BSNE_Loss=0.4814,local_loss: 0.03210487294774857,global_loss: 0.44930288195610046\n",
      "Step 4: BSNE_Loss=0.2198,local_loss: 0.02991553268829581,global_loss: 0.18987461924552917\n",
      "Step 5: BSNE_Loss=0.2486,local_loss: 0.027872343423277112,global_loss: 0.22069256007671356\n",
      "Step 6: BSNE_Loss=0.3400,local_loss: 0.041576008316889665,global_loss: 0.2984655201435089\n",
      "Step 7: BSNE_Loss=0.7501,local_loss: 0.030215382634088496,global_loss: 0.7198395729064941\n",
      "Step 8: BSNE_Loss=0.5296,local_loss: 0.03462818767748545,global_loss: 0.494937539100647\n",
      "Step 9: BSNE_Loss=0.4050,local_loss: 0.03763504170307114,global_loss: 0.36738601326942444\n",
      "Step 10: BSNE_Loss=0.3149,local_loss: 0.03565696326771881,global_loss: 0.27921825647354126\n",
      "Step 11: BSNE_Loss=0.4322,local_loss: 0.03723869145467057,global_loss: 0.3949172794818878\n",
      "Step 12: BSNE_Loss=0.6322,local_loss: 0.02964216716897939,global_loss: 0.6025363206863403\n",
      "Step 13: BSNE_Loss=0.3191,local_loss: 0.03502831982850896,global_loss: 0.2840285301208496\n",
      "Step 14: BSNE_Loss=0.2833,local_loss: 0.04673490891454876,global_loss: 0.23652587831020355\n",
      "Step 15: BSNE_Loss=0.6944,local_loss: 0.028038281720634602,global_loss: 0.6663779616355896\n",
      "Step 16: BSNE_Loss=0.4404,local_loss: 0.043361560827448686,global_loss: 0.39702677726745605\n",
      "Step 17: BSNE_Loss=0.3344,local_loss: 0.030534895712577635,global_loss: 0.30390968918800354\n",
      "Step 18: BSNE_Loss=0.4530,local_loss: 0.03755464853730795,global_loss: 0.4154735803604126\n",
      "Step 19: BSNE_Loss=0.6094,local_loss: 0.03649175892862392,global_loss: 0.5728840827941895\n",
      "Step 20: BSNE_Loss=0.4938,local_loss: 0.03081468820247863,global_loss: 0.46303099393844604\n",
      "Step 21: BSNE_Loss=0.6050,local_loss: 0.031921081298910775,global_loss: 0.5730541348457336\n",
      "Step 22: BSNE_Loss=0.6207,local_loss: 0.029063978828089122,global_loss: 0.5915863513946533\n",
      "Step 23: BSNE_Loss=0.2212,local_loss: 0.029419615524376535,global_loss: 0.19182544946670532\n",
      "Step 24: BSNE_Loss=0.1197,local_loss: 0.045214442092417775,global_loss: 0.07445598393678665\n",
      "Step 25: BSNE_Loss=0.2815,local_loss: 0.044894857670633864,global_loss: 0.23662006855010986\n",
      "Step 26: BSNE_Loss=0.3468,local_loss: 0.032219389845652396,global_loss: 0.31454792618751526\n",
      "Step 27: BSNE_Loss=0.4083,local_loss: 0.026973064248103154,global_loss: 0.38134288787841797\n",
      "Step 28: BSNE_Loss=0.3383,local_loss: 0.028614000424738016,global_loss: 0.30969640612602234\n",
      "Step 29: BSNE_Loss=0.5162,local_loss: 0.02993591198639256,global_loss: 0.48628419637680054\n",
      "Step 30: BSNE_Loss=0.4307,local_loss: 0.03190995824832559,global_loss: 0.3988211750984192\n",
      "Step 31: BSNE_Loss=0.4501,local_loss: 0.03097636039430995,global_loss: 0.4191218316555023\n",
      "Step 32: BSNE_Loss=0.4124,local_loss: 0.029458444526621157,global_loss: 0.38292399048805237\n",
      "Step 33: BSNE_Loss=0.3236,local_loss: 0.035256926391768625,global_loss: 0.2883678078651428\n",
      "Step 34: BSNE_Loss=0.3726,local_loss: 0.03540162647752761,global_loss: 0.33723214268684387\n",
      "Step 35: BSNE_Loss=0.4807,local_loss: 0.033636534886299876,global_loss: 0.44702261686325073\n",
      "Step 36: BSNE_Loss=0.4379,local_loss: 0.03604163154967742,global_loss: 0.40190714597702026\n",
      "Step 37: BSNE_Loss=0.2208,local_loss: 0.031076311892398067,global_loss: 0.18975156545639038\n",
      "Step 38: BSNE_Loss=0.3222,local_loss: 0.02867638159386387,global_loss: 0.2935369908809662\n",
      "Step 39: BSNE_Loss=0.4863,local_loss: 0.03869257955086938,global_loss: 0.44765084981918335\n",
      "Step 40: BSNE_Loss=0.4403,local_loss: 0.02936981399949573,global_loss: 0.41089004278182983\n",
      "Step 41: BSNE_Loss=0.3053,local_loss: 0.03670342160962001,global_loss: 0.26860129833221436\n",
      "Step 42: BSNE_Loss=0.5971,local_loss: 0.030035291611221905,global_loss: 0.567039430141449\n",
      "Step 43: BSNE_Loss=0.4580,local_loss: 0.035016195356377566,global_loss: 0.42296847701072693\n",
      "Step 44: BSNE_Loss=0.2404,local_loss: 0.029503303020335825,global_loss: 0.21093390882015228\n",
      "Step 45: BSNE_Loss=0.1039,local_loss: 0.04400573289105726,global_loss: 0.059915732592344284\n",
      "Step 46: BSNE_Loss=0.3687,local_loss: 0.03297266790533138,global_loss: 0.33569297194480896\n",
      "Step 47: BSNE_Loss=0.4650,local_loss: 0.035424409089757566,global_loss: 0.42961058020591736\n",
      "Step 48: BSNE_Loss=0.2248,local_loss: 0.029092550628690213,global_loss: 0.19572104513645172\n",
      "Step 49: BSNE_Loss=0.4331,local_loss: 0.04093550758303842,global_loss: 0.3921363353729248\n",
      "Step 50: BSNE_Loss=0.2018,local_loss: 0.036337187922084846,global_loss: 0.16542640328407288\n",
      "Step 55: BSNE_Loss=0.2306,local_loss: 0.03780290311504787,global_loss: 0.19275589287281036\n",
      "Step 56: BSNE_Loss=0.2836,local_loss: 0.035569878873616276,global_loss: 0.24799701571464539\n",
      "Step 57: BSNE_Loss=0.3055,local_loss: 0.04553694385964667,global_loss: 0.259935200214386\n",
      "Step 58: BSNE_Loss=0.0882,local_loss: 0.02024487470136125,global_loss: 0.06799153983592987\n",
      "Step 59: BSNE_Loss=0.4520,local_loss: 0.03702764688727141,global_loss: 0.41502144932746887\n",
      "Step 60: BSNE_Loss=0.1895,local_loss: 0.036350621175534205,global_loss: 0.15311093628406525\n",
      "Step 61: BSNE_Loss=0.6220,local_loss: 0.030000973395239,global_loss: 0.5919957160949707\n",
      "Step 62: BSNE_Loss=0.5676,local_loss: 0.030464791133114583,global_loss: 0.537132740020752\n",
      "Step 63: BSNE_Loss=0.3846,local_loss: 0.043587668549547195,global_loss: 0.34103822708129883\n",
      "Step 64: BSNE_Loss=0.2716,local_loss: 0.03329194329062957,global_loss: 0.23827986419200897\n",
      "Step 65: BSNE_Loss=0.5145,local_loss: 0.030480047267206347,global_loss: 0.484027624130249\n",
      "Step 66: BSNE_Loss=0.2800,local_loss: 0.036179789808887086,global_loss: 0.24379989504814148\n",
      "Step 67: BSNE_Loss=0.5019,local_loss: 0.030532549316233122,global_loss: 0.4714145362377167\n",
      "Step 68: BSNE_Loss=0.3997,local_loss: 0.035174571632122134,global_loss: 0.36450886726379395\n",
      "Step 69: BSNE_Loss=0.2653,local_loss: 0.025048680539154267,global_loss: 0.24028007686138153\n",
      "Step 70: BSNE_Loss=0.4921,local_loss: 0.034816557847612874,global_loss: 0.4572703242301941\n",
      "Step 71: BSNE_Loss=0.4065,local_loss: 0.029061059595659668,global_loss: 0.3774232566356659\n",
      "Step 72: BSNE_Loss=0.3992,local_loss: 0.03138882128525981,global_loss: 0.3678598701953888\n",
      "Step 73: BSNE_Loss=0.4562,local_loss: 0.03610595552130957,global_loss: 0.42013049125671387\n",
      "Step 74: BSNE_Loss=0.4011,local_loss: 0.03671096485078887,global_loss: 0.3643653690814972\n",
      "Step 75: BSNE_Loss=0.3880,local_loss: 0.03013529950342596,global_loss: 0.35786861181259155\n",
      "Step 76: BSNE_Loss=0.2819,local_loss: 0.029245545090262844,global_loss: 0.25269386172294617\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 77: BSNE_Loss=0.3428,local_loss: 0.03164264440666845,global_loss: 0.31112805008888245\n",
      "Step 78: BSNE_Loss=0.4434,local_loss: 0.032288374971188714,global_loss: 0.4110621213912964\n",
      "Step 79: BSNE_Loss=0.4827,local_loss: 0.0379413039136414,global_loss: 0.444804847240448\n",
      "Step 80: BSNE_Loss=0.2861,local_loss: 0.04057591560991699,global_loss: 0.24549473822116852\n",
      "Step 81: BSNE_Loss=0.2849,local_loss: 0.03468785996266576,global_loss: 0.25016990303993225\n",
      "Step 82: BSNE_Loss=0.4729,local_loss: 0.03701308621129393,global_loss: 0.4359184503555298\n",
      "Step 83: BSNE_Loss=0.3466,local_loss: 0.03421029156987944,global_loss: 0.3123646676540375\n",
      "Step 84: BSNE_Loss=0.5528,local_loss: 0.029478201330698617,global_loss: 0.5233293771743774\n",
      "Step 85: BSNE_Loss=0.4761,local_loss: 0.03612984473931351,global_loss: 0.4399476945400238\n",
      "Step 86: BSNE_Loss=0.5419,local_loss: 0.03256672342012294,global_loss: 0.509296178817749\n",
      "Step 87: BSNE_Loss=0.4709,local_loss: 0.02934849837078403,global_loss: 0.4415206015110016\n",
      "Step 88: BSNE_Loss=0.4244,local_loss: 0.03138857039572357,global_loss: 0.3930528461933136\n",
      "Step 89: BSNE_Loss=0.3002,local_loss: 0.027765252812212884,global_loss: 0.2724635601043701\n",
      "Step 90: BSNE_Loss=0.2714,local_loss: 0.031366304583225285,global_loss: 0.24006903171539307\n",
      "Step 91: BSNE_Loss=0.4259,local_loss: 0.02903879670707941,global_loss: 0.39684098958969116\n",
      "Step 92: BSNE_Loss=0.1818,local_loss: 0.038328707739678766,global_loss: 0.1434570997953415\n",
      "Step 93: BSNE_Loss=0.5501,local_loss: 0.029407284344404178,global_loss: 0.5207149982452393\n",
      "Step 94: BSNE_Loss=0.3052,local_loss: 0.02222069435905535,global_loss: 0.2830143868923187\n",
      "Step 95: BSNE_Loss=0.3517,local_loss: 0.026927245552805703,global_loss: 0.32474833726882935\n",
      "Step 96: BSNE_Loss=0.3043,local_loss: 0.03056426686318138,global_loss: 0.2737453281879425\n",
      "Step 97: BSNE_Loss=0.2620,local_loss: 0.03728523276211675,global_loss: 0.22469112277030945\n",
      "Step 98: BSNE_Loss=0.3332,local_loss: 0.04438850572662175,global_loss: 0.2887882590293884\n",
      "Step 99: BSNE_Loss=0.5392,local_loss: 0.03653399183092297,global_loss: 0.5026630759239197\n",
      "Step 100: BSNE_Loss=0.4451,local_loss: 0.032165549220116094,global_loss: 0.412903368473053\n",
      "Step 101: BSNE_Loss=0.3138,local_loss: 0.04667246792586297,global_loss: 0.2671404182910919\n",
      "Step 102: BSNE_Loss=0.3946,local_loss: 0.030330249245611333,global_loss: 0.36423569917678833\n",
      "Step 103: BSNE_Loss=0.4207,local_loss: 0.03155544874852547,global_loss: 0.38912713527679443\n",
      "Step 104: BSNE_Loss=0.5266,local_loss: 0.030248251261039645,global_loss: 0.4963042140007019\n",
      "Step 105: BSNE_Loss=0.3316,local_loss: 0.03825968692598561,global_loss: 0.293304443359375\n",
      "Step 106: BSNE_Loss=0.0914,local_loss: 0.022807408066178526,global_loss: 0.06861235946416855\n",
      "Step 107: BSNE_Loss=0.3320,local_loss: 0.026887749309990695,global_loss: 0.30515697598457336\n",
      "Step 108: BSNE_Loss=0.5743,local_loss: 0.03325844039502791,global_loss: 0.5410093665122986\n",
      "Step 109: BSNE_Loss=0.4479,local_loss: 0.03154577735457556,global_loss: 0.41633257269859314\n",
      "Step 110: BSNE_Loss=0.4189,local_loss: 0.02970438827275347,global_loss: 0.38917455077171326\n",
      "Step 111: BSNE_Loss=0.3426,local_loss: 0.03319101513809165,global_loss: 0.3094150722026825\n",
      "Step 112: BSNE_Loss=0.6158,local_loss: 0.02831236859241447,global_loss: 0.5875148773193359\n",
      "Step 113: BSNE_Loss=0.3980,local_loss: 0.03582384973182873,global_loss: 0.36215561628341675\n",
      "Step 114: BSNE_Loss=0.2625,local_loss: 0.0366246463427051,global_loss: 0.22590044140815735\n",
      "Step 115: BSNE_Loss=0.5061,local_loss: 0.029978056720961887,global_loss: 0.47610992193222046\n",
      "Step 116: BSNE_Loss=0.4387,local_loss: 0.03577349708538162,global_loss: 0.40291228890419006\n",
      "Step 117: BSNE_Loss=0.5328,local_loss: 0.030641194706225547,global_loss: 0.5021417737007141\n",
      "Step 118: BSNE_Loss=0.3030,local_loss: 0.039996061934554114,global_loss: 0.2630336880683899\n",
      "Step 119: BSNE_Loss=0.4577,local_loss: 0.03810803540321728,global_loss: 0.4195803999900818\n",
      "Step 120: BSNE_Loss=0.3539,local_loss: 0.0308566164875087,global_loss: 0.3230462968349457\n",
      "Step 121: BSNE_Loss=0.3441,local_loss: 0.028648542564724114,global_loss: 0.3154819905757904\n",
      "Step 122: BSNE_Loss=0.3223,local_loss: 0.028226852837005642,global_loss: 0.2940484881401062\n",
      "Step 123: BSNE_Loss=0.5496,local_loss: 0.030176462504135526,global_loss: 0.519424557685852\n",
      "Step 124: BSNE_Loss=0.4171,local_loss: 0.026868525895772797,global_loss: 0.39018651843070984\n",
      "Step 125: BSNE_Loss=0.4239,local_loss: 0.030747793308460995,global_loss: 0.39317893981933594\n",
      "Step 126: BSNE_Loss=0.4585,local_loss: 0.028035164859238838,global_loss: 0.4304574429988861\n",
      "Step 127: BSNE_Loss=0.2845,local_loss: 0.034977811696862704,global_loss: 0.24957090616226196\n",
      "Step 128: BSNE_Loss=0.5907,local_loss: 0.027512570208048998,global_loss: 0.5631850957870483\n",
      "Step 129: BSNE_Loss=0.4019,local_loss: 0.029129934383641524,global_loss: 0.3727768063545227\n",
      "Step 130: BSNE_Loss=0.4448,local_loss: 0.030095032921829627,global_loss: 0.41467589139938354\n",
      "Step 131: BSNE_Loss=0.3731,local_loss: 0.028682120851639677,global_loss: 0.34439271688461304\n",
      "Step 132: BSNE_Loss=0.4560,local_loss: 0.03562505251487905,global_loss: 0.42039987444877625\n",
      "Step 133: BSNE_Loss=0.4421,local_loss: 0.02913165925659328,global_loss: 0.4129866659641266\n",
      "Step 134: BSNE_Loss=0.3562,local_loss: 0.03121303703143631,global_loss: 0.3250247538089752\n",
      "Step 135: BSNE_Loss=0.2895,local_loss: 0.03511912948471496,global_loss: 0.2543811798095703\n",
      "Step 136: BSNE_Loss=0.4082,local_loss: 0.03737286844302805,global_loss: 0.37079402804374695\n",
      "Step 137: BSNE_Loss=0.3883,local_loss: 0.030830847987541488,global_loss: 0.3575112521648407\n",
      "Step 138: BSNE_Loss=0.3800,local_loss: 0.028377666310689064,global_loss: 0.351601243019104\n",
      "Step 139: BSNE_Loss=0.3024,local_loss: 0.025930952063450897,global_loss: 0.27643027901649475\n",
      "Step 140: BSNE_Loss=0.3938,local_loss: 0.03167663498653106,global_loss: 0.3621087372303009\n",
      "Step 141: BSNE_Loss=0.5213,local_loss: 0.029654124941105465,global_loss: 0.4916642904281616\n",
      "Step 142: BSNE_Loss=0.2590,local_loss: 0.02693907648198578,global_loss: 0.23203468322753906\n",
      "Step 143: BSNE_Loss=0.4046,local_loss: 0.03193327805265953,global_loss: 0.37264034152030945\n",
      "Step 144: BSNE_Loss=0.4482,local_loss: 0.03054728270398241,global_loss: 0.41767942905426025\n",
      "Step 145: BSNE_Loss=0.4574,local_loss: 0.030656493434659725,global_loss: 0.42674311995506287\n",
      "Step 146: BSNE_Loss=0.3989,local_loss: 0.03031945217565735,global_loss: 0.3685365319252014\n",
      "Step 147: BSNE_Loss=0.5129,local_loss: 0.03292218746829792,global_loss: 0.47995346784591675\n",
      "Step 148: BSNE_Loss=0.1314,local_loss: 0.04098128203781082,global_loss: 0.09039092808961868\n",
      "Step 149: BSNE_Loss=0.2178,local_loss: 0.023088416530391526,global_loss: 0.19475626945495605\n",
      "Step 150: BSNE_Loss=0.4343,local_loss: 0.029511937766910322,global_loss: 0.4047434628009796\n",
      "Step 151: BSNE_Loss=0.4403,local_loss: 0.029977283147866577,global_loss: 0.4103526473045349\n",
      "Step 152: BSNE_Loss=0.4366,local_loss: 0.030201875686151028,global_loss: 0.4063907861709595\n",
      "Step 153: BSNE_Loss=0.3232,local_loss: 0.03171245149415704,global_loss: 0.2914706766605377\n",
      "Step 154: BSNE_Loss=0.4146,local_loss: 0.036363495963417446,global_loss: 0.37819311022758484\n",
      "Step 155: BSNE_Loss=0.2840,local_loss: 0.029390840030393058,global_loss: 0.25458192825317383\n",
      "Step 156: BSNE_Loss=0.3588,local_loss: 0.02802966474890942,global_loss: 0.3307619094848633\n",
      "Step 157: BSNE_Loss=0.2614,local_loss: 0.04627278643267241,global_loss: 0.2151278257369995\n",
      "Step 158: BSNE_Loss=0.4615,local_loss: 0.031731301286720394,global_loss: 0.4298000931739807\n",
      "Step 159: BSNE_Loss=0.3097,local_loss: 0.041847028556432686,global_loss: 0.2678576707839966\n",
      "Step 160: BSNE_Loss=0.4564,local_loss: 0.03669880708821896,global_loss: 0.41971439123153687\n",
      "Step 161: BSNE_Loss=0.4207,local_loss: 0.0336838912539033,global_loss: 0.3870176374912262\n",
      "Step 162: BSNE_Loss=0.5517,local_loss: 0.0334891367270842,global_loss: 0.5182532072067261\n",
      "Step 163: BSNE_Loss=0.4030,local_loss: 0.03792625422760927,global_loss: 0.3650766909122467\n",
      "Step 164: BSNE_Loss=0.4675,local_loss: 0.030227926146610063,global_loss: 0.4372650384902954\n",
      "Step 165: BSNE_Loss=0.3270,local_loss: 0.042047471663661004,global_loss: 0.28498461842536926\n",
      "Step 166: BSNE_Loss=0.4789,local_loss: 0.03346065829683047,global_loss: 0.44547706842422485\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 167: BSNE_Loss=0.4178,local_loss: 0.030064409733475052,global_loss: 0.3877216875553131\n",
      "Step 168: BSNE_Loss=0.6213,local_loss: 0.03035142259909894,global_loss: 0.5909385085105896\n",
      "Step 169: BSNE_Loss=0.4967,local_loss: 0.030779499608468833,global_loss: 0.4659401774406433\n",
      "Step 170: BSNE_Loss=0.4077,local_loss: 0.030137918723544557,global_loss: 0.37758105993270874\n",
      "Step 171: BSNE_Loss=0.4525,local_loss: 0.030463691651588336,global_loss: 0.4220397174358368\n",
      "Step 172: BSNE_Loss=0.3279,local_loss: 0.028296418725585824,global_loss: 0.29958921670913696\n",
      "Step 173: BSNE_Loss=0.3965,local_loss: 0.03133332010272141,global_loss: 0.3651377558708191\n",
      "Step 174: BSNE_Loss=0.4033,local_loss: 0.03221307291971671,global_loss: 0.3710624575614929\n",
      "Step 175: BSNE_Loss=0.4845,local_loss: 0.03325439594379813,global_loss: 0.45126259326934814\n",
      "Step 176: BSNE_Loss=0.3002,local_loss: 0.028033253888760372,global_loss: 0.27218836545944214\n",
      "Step 177: BSNE_Loss=0.2749,local_loss: 0.03247642717714921,global_loss: 0.2424503117799759\n",
      "Step 178: BSNE_Loss=0.3420,local_loss: 0.035893874301366605,global_loss: 0.3061009347438812\n",
      "Step 179: BSNE_Loss=0.2018,local_loss: 0.03751467676232204,global_loss: 0.16431036591529846\n",
      "Step 180: BSNE_Loss=0.2682,local_loss: 0.04120153375365554,global_loss: 0.22695782780647278\n",
      "Step 181: BSNE_Loss=0.4043,local_loss: 0.02745145977565694,global_loss: 0.37689289450645447\n",
      "Step 182: BSNE_Loss=0.4362,local_loss: 0.028867498767953737,global_loss: 0.4073019325733185\n",
      "Step 183: BSNE_Loss=0.2397,local_loss: 0.027218234688522205,global_loss: 0.21252630650997162\n",
      "Step 184: BSNE_Loss=0.3265,local_loss: 0.030391373113460112,global_loss: 0.2961495518684387\n",
      "Step 185: BSNE_Loss=0.5218,local_loss: 0.03617319018267138,global_loss: 0.48567286133766174\n",
      "Step 186: BSNE_Loss=0.3257,local_loss: 0.027797615492134334,global_loss: 0.29794424772262573\n",
      "Step 187: BSNE_Loss=0.1136,local_loss: 0.03599175486807395,global_loss: 0.07764442265033722\n",
      "Step 188: BSNE_Loss=0.3917,local_loss: 0.033316739412868404,global_loss: 0.3584093153476715\n",
      "Step 189: BSNE_Loss=0.3993,local_loss: 0.02921514251810476,global_loss: 0.370074599981308\n",
      "Step 190: BSNE_Loss=0.3512,local_loss: 0.0326816574550855,global_loss: 0.3184875547885895\n",
      "Step 191: BSNE_Loss=0.4381,local_loss: 0.031664997581698745,global_loss: 0.40648335218429565\n",
      "Step 192: BSNE_Loss=0.5647,local_loss: 0.03328048442029686,global_loss: 0.5314028263092041\n",
      "Step 193: BSNE_Loss=0.5974,local_loss: 0.03051405641995874,global_loss: 0.5669030547142029\n",
      "Step 194: BSNE_Loss=0.5172,local_loss: 0.03072957027224099,global_loss: 0.48651254177093506\n",
      "Step 195: BSNE_Loss=0.0686,local_loss: 0.04594212772756327,global_loss: 0.022686699405312538\n",
      "Step 196: BSNE_Loss=0.2956,local_loss: 0.02844264528434931,global_loss: 0.26720595359802246\n",
      "Step 197: BSNE_Loss=0.3954,local_loss: 0.0362617014697772,global_loss: 0.35912346839904785\n",
      "Step 198: BSNE_Loss=0.4190,local_loss: 0.03685899644450442,global_loss: 0.3820953667163849\n",
      "Step 199: BSNE_Loss=0.4613,local_loss: 0.03459199669965535,global_loss: 0.42675355076789856\n",
      "Center node 3745 finished at step 200 with loss 0.4613\n",
      "\n",
      "=== Pretraining Epoch 39 (Center Node: 9673) ===\n",
      "Step 0: BSNE_Loss=0.2058,local_loss: 0.0468717781726328,global_loss: 0.15889552235603333\n",
      "Step 1: BSNE_Loss=0.3475,local_loss: 0.029278722129489496,global_loss: 0.31818780303001404\n",
      "Step 2: BSNE_Loss=0.2193,local_loss: 0.05473296872580186,global_loss: 0.16458453238010406\n",
      "Step 3: BSNE_Loss=0.3700,local_loss: 0.04151673925199709,global_loss: 0.3285212814807892\n",
      "Step 4: BSNE_Loss=0.0869,local_loss: 0.050251184207631805,global_loss: 0.03667518496513367\n",
      "Step 5: BSNE_Loss=0.3305,local_loss: 0.02926892772146359,global_loss: 0.30124229192733765\n",
      "Step 6: BSNE_Loss=0.4102,local_loss: 0.042499992301441775,global_loss: 0.3677206337451935\n",
      "Step 7: BSNE_Loss=0.2822,local_loss: 0.039819159104053876,global_loss: 0.24234600365161896\n",
      "Step 8: BSNE_Loss=0.2599,local_loss: 0.0432841195744612,global_loss: 0.21659934520721436\n",
      "Step 9: BSNE_Loss=0.2008,local_loss: 0.033333500875611016,global_loss: 0.16742829978466034\n",
      "Step 10: BSNE_Loss=0.2954,local_loss: 0.05167139234970826,global_loss: 0.24370282888412476\n",
      "Step 11: BSNE_Loss=0.1512,local_loss: 0.03657403397044766,global_loss: 0.11466847360134125\n",
      "Step 12: BSNE_Loss=0.2990,local_loss: 0.025154252330701815,global_loss: 0.2738941013813019\n",
      "Step 13: BSNE_Loss=0.2901,local_loss: 0.047903706112136166,global_loss: 0.24216730892658234\n",
      "Step 14: BSNE_Loss=0.3707,local_loss: 0.03417385601637953,global_loss: 0.33649247884750366\n",
      "Step 15: BSNE_Loss=0.2186,local_loss: 0.05260276124629334,global_loss: 0.1660013198852539\n",
      "Step 16: BSNE_Loss=0.1173,local_loss: 0.05485616035073562,global_loss: 0.062432609498500824\n",
      "Step 17: BSNE_Loss=0.1722,local_loss: 0.037320932151816595,global_loss: 0.13484860956668854\n",
      "Step 18: BSNE_Loss=0.3804,local_loss: 0.0523619887252619,global_loss: 0.328071266412735\n",
      "Step 19: BSNE_Loss=0.2346,local_loss: 0.04120681527369406,global_loss: 0.1933654248714447\n",
      "Step 20: BSNE_Loss=0.0172,local_loss: 0.05366151563189234,global_loss: -0.036462265998125076\n",
      "Center node 9673 finished at step 21 with loss 0.0172\n",
      "\n",
      "=== Pretraining Epoch 40 (Center Node: 7864) ===\n",
      "Step 0: BSNE_Loss=0.2531,local_loss: 0.04255134215862605,global_loss: 0.2105000615119934\n",
      "Step 1: BSNE_Loss=0.2173,local_loss: 0.044840571174499896,global_loss: 0.17245440185070038\n",
      "Step 2: BSNE_Loss=0.3103,local_loss: 0.08280931593574091,global_loss: 0.22744138538837433\n",
      "Step 3: BSNE_Loss=0.1072,local_loss: 0.045876554343801654,global_loss: 0.06127571314573288\n",
      "Step 4: BSNE_Loss=0.3761,local_loss: 0.03369413262406328,global_loss: 0.34235844016075134\n",
      "Step 5: BSNE_Loss=0.3272,local_loss: 0.03034745927316967,global_loss: 0.2968158423900604\n",
      "Step 6: BSNE_Loss=0.2471,local_loss: 0.047540743730437734,global_loss: 0.19951559603214264\n",
      "Step 7: BSNE_Loss=0.3075,local_loss: 0.05348468992470923,global_loss: 0.2540591061115265\n",
      "Step 8: BSNE_Loss=0.2436,local_loss: 0.049192662071773294,global_loss: 0.19438356161117554\n",
      "Step 9: BSNE_Loss=-0.0115,local_loss: 0.06482176496210135,global_loss: -0.07632036507129669\n",
      "Center node 7864 finished at step 10 with loss -0.0115\n",
      "\n",
      "=== Pretraining Epoch 41 (Center Node: 8892) ===\n",
      "Step 0: BSNE_Loss=0.5007,local_loss: 0.01462520500666157,global_loss: 0.4860800802707672\n",
      "Step 1: BSNE_Loss=0.2019,local_loss: 0.03750015032573184,global_loss: 0.1643616408109665\n",
      "Step 2: BSNE_Loss=0.3632,local_loss: 0.036297543936407474,global_loss: 0.3268866240978241\n",
      "Step 3: BSNE_Loss=0.4576,local_loss: 0.02022342813067322,global_loss: 0.437392920255661\n",
      "Step 4: BSNE_Loss=0.3053,local_loss: 0.03352718064467164,global_loss: 0.2717340588569641\n",
      "Step 5: BSNE_Loss=0.4313,local_loss: 0.01464159840681267,global_loss: 0.416643351316452\n",
      "Step 6: BSNE_Loss=0.4879,local_loss: 0.030860115159002798,global_loss: 0.456993043422699\n",
      "Step 7: BSNE_Loss=0.5309,local_loss: 0.020081329897680432,global_loss: 0.5107991695404053\n",
      "Step 8: BSNE_Loss=0.1156,local_loss: 0.03714254652942419,global_loss: 0.07844579219818115\n",
      "Step 9: BSNE_Loss=0.1055,local_loss: 0.03698522529311536,global_loss: 0.06849002838134766\n",
      "Step 10: BSNE_Loss=0.1334,local_loss: 0.03167502284634838,global_loss: 0.10174732655286789\n",
      "Step 11: BSNE_Loss=0.4424,local_loss: 0.02918060917491038,global_loss: 0.41324448585510254\n",
      "Step 12: BSNE_Loss=0.4353,local_loss: 0.038096988785751885,global_loss: 0.3972443640232086\n",
      "Step 13: BSNE_Loss=0.2854,local_loss: 0.04854297350055726,global_loss: 0.23687531054019928\n",
      "Step 14: BSNE_Loss=0.1609,local_loss: 0.038136521723242466,global_loss: 0.12275055050849915\n",
      "Step 15: BSNE_Loss=0.1382,local_loss: 0.041448595324572554,global_loss: 0.09680093824863434\n",
      "Step 16: BSNE_Loss=0.4004,local_loss: 0.021294108432762072,global_loss: 0.379138320684433\n",
      "Step 17: BSNE_Loss=-0.0058,local_loss: 0.04900370525438129,global_loss: -0.05482996627688408\n",
      "Center node 8892 finished at step 18 with loss -0.0058\n",
      "\n",
      "=== Pretraining Epoch 42 (Center Node: 1576) ===\n",
      "Step 0: BSNE_Loss=0.2120,local_loss: 0.017068756451574036,global_loss: 0.19497288763523102\n",
      "Step 1: BSNE_Loss=0.1722,local_loss: 0.019947707560392488,global_loss: 0.152288019657135\n",
      "Step 2: BSNE_Loss=0.3194,local_loss: 0.022286725295347306,global_loss: 0.2970816493034363\n",
      "Step 3: BSNE_Loss=0.2695,local_loss: 0.027033079903973644,global_loss: 0.24247734248638153\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 4: BSNE_Loss=0.2353,local_loss: 0.021182124174895484,global_loss: 0.21412627398967743\n",
      "Step 5: BSNE_Loss=0.2062,local_loss: 0.018664801306179618,global_loss: 0.18755705654621124\n",
      "Step 6: BSNE_Loss=0.1642,local_loss: 0.011931888934890042,global_loss: 0.15230296552181244\n",
      "Step 7: BSNE_Loss=0.1094,local_loss: 0.007634586301573994,global_loss: 0.10171588510274887\n",
      "Step 8: BSNE_Loss=0.2898,local_loss: 0.022904819015834556,global_loss: 0.2668701708316803\n",
      "Step 9: BSNE_Loss=0.2591,local_loss: 0.018691319802501567,global_loss: 0.24043166637420654\n",
      "Step 10: BSNE_Loss=0.1218,local_loss: 0.009055001393817328,global_loss: 0.11272042989730835\n",
      "Step 11: BSNE_Loss=0.0968,local_loss: 0.014914633653089737,global_loss: 0.0819077119231224\n",
      "Step 12: BSNE_Loss=0.1824,local_loss: 0.014836456691023521,global_loss: 0.16757982969284058\n",
      "Step 13: BSNE_Loss=0.1481,local_loss: 0.026586022106030997,global_loss: 0.12148448824882507\n",
      "Step 14: BSNE_Loss=0.1736,local_loss: 0.021630792506076607,global_loss: 0.15193642675876617\n",
      "Step 15: BSNE_Loss=0.2918,local_loss: 0.03186440551958614,global_loss: 0.2599670886993408\n",
      "Step 16: BSNE_Loss=0.1415,local_loss: 0.02406166029922662,global_loss: 0.11738951504230499\n",
      "Step 17: BSNE_Loss=0.2092,local_loss: 0.02920285909613386,global_loss: 0.18000715970993042\n",
      "Step 18: BSNE_Loss=0.1398,local_loss: 0.021835647519877335,global_loss: 0.11800479143857956\n",
      "Step 19: BSNE_Loss=0.3740,local_loss: 0.044367782027212206,global_loss: 0.32966095209121704\n",
      "Step 20: BSNE_Loss=0.2573,local_loss: 0.0374311316849187,global_loss: 0.21982786059379578\n",
      "Step 21: BSNE_Loss=0.1502,local_loss: 0.013752081648341411,global_loss: 0.13641296327114105\n",
      "Step 22: BSNE_Loss=0.1709,local_loss: 0.01979459427647534,global_loss: 0.15110546350479126\n",
      "Step 23: BSNE_Loss=0.1557,local_loss: 0.020437410891893416,global_loss: 0.13530227541923523\n",
      "Step 24: BSNE_Loss=0.2434,local_loss: 0.025313456726721226,global_loss: 0.218126580119133\n",
      "Step 25: BSNE_Loss=0.1727,local_loss: 0.021471035097774954,global_loss: 0.15121829509735107\n",
      "Step 26: BSNE_Loss=0.1374,local_loss: 0.013882557505275481,global_loss: 0.12347296625375748\n",
      "Step 27: BSNE_Loss=0.1300,local_loss: 0.008619640526186708,global_loss: 0.12135884910821915\n",
      "Step 28: BSNE_Loss=0.0720,local_loss: 0.013010997858950305,global_loss: 0.05897553637623787\n",
      "Step 29: BSNE_Loss=0.2142,local_loss: 0.02051358451881194,global_loss: 0.19370335340499878\n",
      "Step 30: BSNE_Loss=0.1371,local_loss: 0.018167600581126764,global_loss: 0.1189514547586441\n",
      "Step 31: BSNE_Loss=0.2190,local_loss: 0.0188696643497379,global_loss: 0.20017537474632263\n",
      "Step 32: BSNE_Loss=0.1541,local_loss: 0.018493851672597282,global_loss: 0.1356416642665863\n",
      "Step 33: BSNE_Loss=0.1748,local_loss: 0.01390054346406314,global_loss: 0.160907581448555\n",
      "Step 34: BSNE_Loss=0.3364,local_loss: 0.02640393238081815,global_loss: 0.3099578619003296\n",
      "Step 35: BSNE_Loss=0.3253,local_loss: 0.026327572878756204,global_loss: 0.298992782831192\n",
      "Step 36: BSNE_Loss=0.0340,local_loss: 0.0021989994566425234,global_loss: 0.03182673081755638\n",
      "Center node 1576 finished at step 37 with loss 0.0340\n",
      "\n",
      "=== Pretraining Epoch 43 (Center Node: 536) ===\n",
      "Step 0: BSNE_Loss=0.1261,local_loss: 0.046447067942398446,global_loss: 0.079655222594738\n",
      "Step 1: BSNE_Loss=0.0257,local_loss: 0.061849886293464655,global_loss: -0.03613993525505066\n",
      "Center node 536 finished at step 2 with loss 0.0257\n",
      "\n",
      "=== Pretraining Epoch 44 (Center Node: 9609) ===\n",
      "Step 0: BSNE_Loss=0.1502,local_loss: 0.036797879380996615,global_loss: 0.11342426389455795\n",
      "Step 1: BSNE_Loss=0.5509,local_loss: 0.04220694086677182,global_loss: 0.5087350010871887\n",
      "Step 2: BSNE_Loss=0.4664,local_loss: 0.04102321238295034,global_loss: 0.42541590332984924\n",
      "Step 3: BSNE_Loss=0.2402,local_loss: 0.035456778001487915,global_loss: 0.2047189623117447\n",
      "Step 4: BSNE_Loss=-0.0043,local_loss: 0.04949813620193699,global_loss: -0.053814370185136795\n",
      "Center node 9609 finished at step 5 with loss -0.0043\n",
      "\n",
      "=== Pretraining Epoch 45 (Center Node: 1459) ===\n",
      "Step 0: BSNE_Loss=0.2220,local_loss: 0.04122828473083219,global_loss: 0.18082071840763092\n",
      "Step 1: BSNE_Loss=0.1097,local_loss: 0.07338635796092603,global_loss: 0.03630497306585312\n",
      "Step 2: BSNE_Loss=0.1311,local_loss: 0.04799920036172733,global_loss: 0.08305811136960983\n",
      "Step 3: BSNE_Loss=0.1732,local_loss: 0.07547722617297944,global_loss: 0.09773263335227966\n",
      "Step 4: BSNE_Loss=-0.0792,local_loss: 0.07236615681148137,global_loss: -0.1515372097492218\n",
      "Center node 1459 finished at step 5 with loss -0.0792\n",
      "\n",
      "=== Pretraining Epoch 46 (Center Node: 4284) ===\n",
      "Step 0: BSNE_Loss=0.2673,local_loss: 0.0593981982975425,global_loss: 0.20785410702228546\n",
      "Step 1: BSNE_Loss=0.3228,local_loss: 0.07760605745660183,global_loss: 0.24519813060760498\n",
      "Step 2: BSNE_Loss=0.4712,local_loss: 0.0721026530812536,global_loss: 0.39910000562667847\n",
      "Step 3: BSNE_Loss=0.2919,local_loss: 0.08387046156718451,global_loss: 0.2079811543226242\n",
      "Step 4: BSNE_Loss=0.0250,local_loss: 0.1041582729404271,global_loss: -0.07910898327827454\n",
      "Center node 4284 finished at step 5 with loss 0.0250\n",
      "\n",
      "=== Pretraining Epoch 47 (Center Node: 4460) ===\n",
      "Step 0: BSNE_Loss=-0.0303,local_loss: 0.09131189796115097,global_loss: -0.12158745527267456\n",
      "Center node 4460 finished at step 1 with loss -0.0303\n",
      "\n",
      "=== Pretraining Epoch 48 (Center Node: 10416) ===\n",
      "Step 0: BSNE_Loss=0.1164,local_loss: 0.03025686777048229,global_loss: 0.08613157272338867\n",
      "Step 1: BSNE_Loss=-0.0775,local_loss: 0.05416556599817022,global_loss: -0.13165175914764404\n",
      "Center node 10416 finished at step 2 with loss -0.0775\n",
      "\n",
      "=== Pretraining Epoch 49 (Center Node: 2910) ===\n",
      "Step 0: BSNE_Loss=0.2601,local_loss: 0.1011907731058796,global_loss: 0.15892988443374634\n",
      "Step 1: BSNE_Loss=0.1962,local_loss: 0.08073739228253284,global_loss: 0.11543668061494827\n",
      "Step 2: BSNE_Loss=0.0622,local_loss: 0.09249537235708119,global_loss: -0.030285725370049477\n",
      "Step 3: BSNE_Loss=0.3435,local_loss: 0.06027206248528667,global_loss: 0.28318357467651367\n",
      "Step 4: BSNE_Loss=0.3154,local_loss: 0.07472819278477047,global_loss: 0.24071328341960907\n",
      "Step 5: BSNE_Loss=0.1037,local_loss: 0.09448744650381292,global_loss: 0.009259259328246117\n",
      "Step 6: BSNE_Loss=0.1494,local_loss: 0.10456567607739013,global_loss: 0.044810496270656586\n",
      "Step 7: BSNE_Loss=0.2278,local_loss: 0.09761262079657002,global_loss: 0.13019929826259613\n",
      "Step 8: BSNE_Loss=0.4722,local_loss: 0.0812919998281553,global_loss: 0.39086994528770447\n",
      "Step 9: BSNE_Loss=0.5334,local_loss: 0.05478772189456665,global_loss: 0.4786083698272705\n",
      "Step 10: BSNE_Loss=0.3407,local_loss: 0.061658874673123545,global_loss: 0.27899569272994995\n",
      "Step 11: BSNE_Loss=0.4759,local_loss: 0.05909734570961526,global_loss: 0.4168502688407898\n",
      "Step 12: BSNE_Loss=0.0931,local_loss: 0.08099339829705507,global_loss: 0.012152205221354961\n",
      "Step 13: BSNE_Loss=0.4329,local_loss: 0.09781633568736459,global_loss: 0.3351149559020996\n",
      "Step 14: BSNE_Loss=0.4175,local_loss: 0.06151995534614047,global_loss: 0.3559969961643219\n",
      "Step 15: BSNE_Loss=0.8696,local_loss: 0.045200918910128146,global_loss: 0.824448823928833\n",
      "Step 16: BSNE_Loss=0.8248,local_loss: 0.04631235354183291,global_loss: 0.7784678936004639\n",
      "Step 17: BSNE_Loss=0.2534,local_loss: 0.074683123917189,global_loss: 0.17876632511615753\n",
      "Step 18: BSNE_Loss=0.4332,local_loss: 0.05215326992142564,global_loss: 0.38106223940849304\n",
      "Step 19: BSNE_Loss=0.4135,local_loss: 0.09469149729807566,global_loss: 0.3188106119632721\n",
      "Step 20: BSNE_Loss=0.2365,local_loss: 0.08783455911701615,global_loss: 0.14864923059940338\n",
      "Step 21: BSNE_Loss=0.2323,local_loss: 0.07918018078601226,global_loss: 0.15313641726970673\n",
      "Step 22: BSNE_Loss=0.3398,local_loss: 0.062292592238193334,global_loss: 0.27747154235839844\n",
      "Step 23: BSNE_Loss=0.1894,local_loss: 0.07640007754532113,global_loss: 0.11304329335689545\n",
      "Step 24: BSNE_Loss=0.0762,local_loss: 0.07476495084879638,global_loss: 0.001438060076907277\n",
      "Step 25: BSNE_Loss=0.1376,local_loss: 0.04351293573701426,global_loss: 0.094068244099617\n",
      "Step 26: BSNE_Loss=0.0906,local_loss: 0.06357455047243518,global_loss: 0.027060015127062798\n",
      "Step 27: BSNE_Loss=0.1843,local_loss: 0.07774272239261162,global_loss: 0.10657411813735962\n",
      "Step 28: BSNE_Loss=0.2268,local_loss: 0.07575923894094119,global_loss: 0.15106230974197388\n",
      "Step 29: BSNE_Loss=0.4106,local_loss: 0.07043169859059081,global_loss: 0.34013375639915466\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 30: BSNE_Loss=0.2510,local_loss: 0.09056050099289373,global_loss: 0.1604500412940979\n",
      "Step 31: BSNE_Loss=0.2909,local_loss: 0.09641232213811218,global_loss: 0.1945163607597351\n",
      "Step 32: BSNE_Loss=0.2451,local_loss: 0.06672544909081522,global_loss: 0.17835001647472382\n",
      "Step 33: BSNE_Loss=0.3160,local_loss: 0.059714909615771125,global_loss: 0.25630539655685425\n",
      "Step 34: BSNE_Loss=0.2454,local_loss: 0.05810050203400728,global_loss: 0.18733586370944977\n",
      "Step 35: BSNE_Loss=0.0731,local_loss: 0.09929571099652677,global_loss: -0.026214517652988434\n",
      "Step 36: BSNE_Loss=0.2479,local_loss: 0.06778462759284741,global_loss: 0.1801137924194336\n",
      "Step 37: BSNE_Loss=0.0405,local_loss: 0.08447284690627216,global_loss: -0.04399602860212326\n",
      "Center node 2910 finished at step 38 with loss 0.0405\n",
      "\n",
      "=== Pretraining Epoch 50 (Center Node: 6907) ===\n",
      "Step 0: BSNE_Loss=0.1984,local_loss: 0.09767334471663683,global_loss: 0.10069617629051208\n",
      "Step 1: BSNE_Loss=0.1389,local_loss: 0.09660677376475775,global_loss: 0.04224413260817528\n",
      "Step 2: BSNE_Loss=0.0141,local_loss: 0.11892581675021888,global_loss: -0.10480286180973053\n",
      "Center node 6907 finished at step 3 with loss 0.0141\n",
      "\n",
      "=== Pretraining Epoch 51 (Center Node: 2172) ===\n",
      "Step 0: BSNE_Loss=0.2326,local_loss: 0.04567035627382729,global_loss: 0.18689076602458954\n",
      "Step 1: BSNE_Loss=0.0154,local_loss: 0.0253799739997218,global_loss: -0.009945753030478954\n",
      "Center node 2172 finished at step 2 with loss 0.0154\n",
      "\n",
      "=== Pretraining Epoch 52 (Center Node: 4403) ===\n",
      "Step 0: BSNE_Loss=0.2039,local_loss: 0.040830441901568315,global_loss: 0.1630573868751526\n",
      "Step 1: BSNE_Loss=0.3499,local_loss: 0.06097776219074667,global_loss: 0.2889353334903717\n",
      "Step 2: BSNE_Loss=-0.1144,local_loss: 0.04402906888029824,global_loss: -0.15839751064777374\n",
      "Center node 4403 finished at step 3 with loss -0.1144\n",
      "\n",
      "=== Pretraining Epoch 53 (Center Node: 1106) ===\n",
      "Step 0: BSNE_Loss=0.6888,local_loss: 0.04711337319955105,global_loss: 0.6416497230529785\n",
      "Step 1: BSNE_Loss=0.2735,local_loss: 0.05604488288982404,global_loss: 0.21749810874462128\n",
      "Step 2: BSNE_Loss=0.3389,local_loss: 0.028511879488956556,global_loss: 0.31040728092193604\n",
      "Step 3: BSNE_Loss=0.2567,local_loss: 0.06166641077511063,global_loss: 0.19499926269054413\n",
      "Step 4: BSNE_Loss=0.1543,local_loss: 0.04244568664347188,global_loss: 0.11184439063072205\n",
      "Step 5: BSNE_Loss=-0.2191,local_loss: 0.03765761673509534,global_loss: -0.25678619742393494\n",
      "Center node 1106 finished at step 6 with loss -0.2191\n",
      "\n",
      "=== Pretraining Epoch 54 (Center Node: 7512) ===\n",
      "Step 0: BSNE_Loss=-0.0053,local_loss: 0.04118612732829606,global_loss: -0.04652140662074089\n",
      "Center node 7512 finished at step 1 with loss -0.0053\n",
      "\n",
      "=== Pretraining Epoch 55 (Center Node: 5661) ===\n",
      "Step 0: BSNE_Loss=0.5169,local_loss: 0.023024966082279573,global_loss: 0.4938667416572571\n",
      "Step 1: BSNE_Loss=0.6027,local_loss: 0.039550970669252755,global_loss: 0.5631276369094849\n",
      "Step 2: BSNE_Loss=0.6201,local_loss: 0.03608571059259644,global_loss: 0.584054172039032\n",
      "Step 3: BSNE_Loss=0.5989,local_loss: 0.03749986575758461,global_loss: 0.5614222288131714\n",
      "Step 4: BSNE_Loss=0.5168,local_loss: 0.06270138381846853,global_loss: 0.4540524482727051\n",
      "Step 5: BSNE_Loss=0.4733,local_loss: 0.03901117288473933,global_loss: 0.43428197503089905\n",
      "Step 6: BSNE_Loss=0.8048,local_loss: 0.029838844379150485,global_loss: 0.7749522924423218\n",
      "Step 7: BSNE_Loss=0.3955,local_loss: 0.027884920657184877,global_loss: 0.367567777633667\n",
      "Step 8: BSNE_Loss=0.6822,local_loss: 0.029781621166991228,global_loss: 0.6524322032928467\n",
      "Step 9: BSNE_Loss=0.5965,local_loss: 0.027534520193364994,global_loss: 0.5689206719398499\n",
      "Step 10: BSNE_Loss=0.5376,local_loss: 0.027501895520750892,global_loss: 0.5101267099380493\n",
      "Step 11: BSNE_Loss=0.5054,local_loss: 0.020790507999081186,global_loss: 0.4846426844596863\n",
      "Step 12: BSNE_Loss=0.5051,local_loss: 0.02745651669352363,global_loss: 0.4776724576950073\n",
      "Step 13: BSNE_Loss=0.5733,local_loss: 0.02910476024884987,global_loss: 0.5441614985466003\n",
      "Step 14: BSNE_Loss=0.5208,local_loss: 0.03289030609211489,global_loss: 0.4879387617111206\n",
      "Step 15: BSNE_Loss=0.4372,local_loss: 0.04562538603036663,global_loss: 0.3916078507900238\n",
      "Step 16: BSNE_Loss=0.3554,local_loss: 0.052888928937377434,global_loss: 0.30252939462661743\n",
      "Step 17: BSNE_Loss=0.3781,local_loss: 0.04891772909323448,global_loss: 0.32916051149368286\n",
      "Step 18: BSNE_Loss=0.5677,local_loss: 0.042472094640649855,global_loss: 0.5252299308776855\n",
      "Step 19: BSNE_Loss=0.5408,local_loss: 0.052697617645936626,global_loss: 0.48810723423957825\n",
      "Step 20: BSNE_Loss=0.2301,local_loss: 0.0479535793082857,global_loss: 0.1821507215499878\n",
      "Step 21: BSNE_Loss=0.2472,local_loss: 0.051231357770673884,global_loss: 0.1959664523601532\n",
      "Step 22: BSNE_Loss=0.5006,local_loss: 0.048514041543887026,global_loss: 0.4521193504333496\n",
      "Step 23: BSNE_Loss=0.4477,local_loss: 0.059661222917917825,global_loss: 0.3880569338798523\n",
      "Step 24: BSNE_Loss=0.1886,local_loss: 0.07444977514773413,global_loss: 0.11411122977733612\n",
      "Step 25: BSNE_Loss=0.5079,local_loss: 0.04884626113061884,global_loss: 0.45908600091934204\n",
      "Step 26: BSNE_Loss=0.3569,local_loss: 0.044083673078444595,global_loss: 0.31284159421920776\n",
      "Step 27: BSNE_Loss=0.5460,local_loss: 0.04457300366485163,global_loss: 0.5014360547065735\n",
      "Step 28: BSNE_Loss=0.7004,local_loss: 0.03091859388637795,global_loss: 0.6694674491882324\n",
      "Step 29: BSNE_Loss=0.1123,local_loss: 0.05918012108160338,global_loss: 0.053120627999305725\n",
      "Step 30: BSNE_Loss=0.3951,local_loss: 0.05064137878750793,global_loss: 0.34445691108703613\n",
      "Step 31: BSNE_Loss=0.4817,local_loss: 0.043392507219992986,global_loss: 0.4383029639720917\n",
      "Step 32: BSNE_Loss=0.1190,local_loss: 0.06554887933585278,global_loss: 0.0534873828291893\n",
      "Step 33: BSNE_Loss=0.0384,local_loss: 0.05966515843839295,global_loss: -0.02121690660715103\n",
      "Center node 5661 finished at step 34 with loss 0.0384\n",
      "\n",
      "=== Pretraining Epoch 56 (Center Node: 714) ===\n",
      "Step 0: BSNE_Loss=0.1393,local_loss: 0.01786424694765822,global_loss: 0.12140055000782013\n",
      "Step 1: BSNE_Loss=-0.0829,local_loss: 0.023443138225718613,global_loss: -0.10638189315795898\n",
      "Center node 714 finished at step 2 with loss -0.0829\n",
      "\n",
      "=== Pretraining Epoch 57 (Center Node: 6184) ===\n",
      "Step 0: BSNE_Loss=0.4549,local_loss: 0.009465290399120863,global_loss: 0.44547462463378906\n",
      "Step 1: BSNE_Loss=0.4734,local_loss: 0.014801596941967716,global_loss: 0.4586057662963867\n",
      "Step 2: BSNE_Loss=0.4670,local_loss: 0.015291677152508234,global_loss: 0.45170414447784424\n",
      "Step 3: BSNE_Loss=0.4097,local_loss: 0.020473747101183576,global_loss: 0.3892604112625122\n",
      "Step 4: BSNE_Loss=0.4969,local_loss: 0.008694071986236564,global_loss: 0.4882415533065796\n",
      "Step 5: BSNE_Loss=0.4790,local_loss: 0.009284617572652875,global_loss: 0.4696747064590454\n",
      "Step 6: BSNE_Loss=0.4317,local_loss: 0.012775626377687059,global_loss: 0.41888245940208435\n",
      "Step 7: BSNE_Loss=0.4715,local_loss: 0.01113073310112136,global_loss: 0.4604140520095825\n",
      "Step 8: BSNE_Loss=0.4841,local_loss: 0.009607956041937503,global_loss: 0.47449517250061035\n",
      "Step 9: BSNE_Loss=0.3874,local_loss: 0.0157366036850238,global_loss: 0.37168124318122864\n",
      "Step 10: BSNE_Loss=0.4769,local_loss: 0.011034976749496469,global_loss: 0.4659120738506317\n",
      "Step 11: BSNE_Loss=0.3883,local_loss: 0.018841202821548126,global_loss: 0.3694281280040741\n",
      "Step 12: BSNE_Loss=0.3493,local_loss: 0.019015929040586983,global_loss: 0.33031660318374634\n",
      "Step 13: BSNE_Loss=0.5073,local_loss: 0.011300733995575,global_loss: 0.4959496855735779\n",
      "Step 14: BSNE_Loss=0.3256,local_loss: 0.021116815060135817,global_loss: 0.30450600385665894\n",
      "Step 15: BSNE_Loss=0.5026,local_loss: 0.01453299594849467,global_loss: 0.4880579710006714\n",
      "Step 16: BSNE_Loss=0.2218,local_loss: 0.03250862358159899,global_loss: 0.18933604657649994\n",
      "Step 17: BSNE_Loss=0.4045,local_loss: 0.016668834051551652,global_loss: 0.38782989978790283\n",
      "Step 18: BSNE_Loss=0.4166,local_loss: 0.025090379383122717,global_loss: 0.3915449380874634\n",
      "Step 19: BSNE_Loss=0.4675,local_loss: 0.015858049538617816,global_loss: 0.451654314994812\n",
      "Step 20: BSNE_Loss=0.4072,local_loss: 0.014612080574258154,global_loss: 0.39260560274124146\n",
      "Step 21: BSNE_Loss=0.1147,local_loss: 0.03947584657104339,global_loss: 0.07522109895944595\n",
      "Step 22: BSNE_Loss=0.5001,local_loss: 0.013735164581887786,global_loss: 0.4864041805267334\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 23: BSNE_Loss=0.3823,local_loss: 0.020977225576095854,global_loss: 0.36135759949684143\n",
      "Step 24: BSNE_Loss=0.2254,local_loss: 0.03063991803730025,global_loss: 0.19475862383842468\n",
      "Step 25: BSNE_Loss=0.2817,local_loss: 0.023202589347149444,global_loss: 0.2584846019744873\n",
      "Step 26: BSNE_Loss=0.2799,local_loss: 0.02013550047651759,global_loss: 0.2597289979457855\n",
      "Step 27: BSNE_Loss=0.4420,local_loss: 0.022225899225256654,global_loss: 0.41975873708724976\n",
      "Step 28: BSNE_Loss=0.3114,local_loss: 0.018826500104819077,global_loss: 0.29257622361183167\n",
      "Step 29: BSNE_Loss=0.3702,local_loss: 0.02785234458462905,global_loss: 0.3422990143299103\n",
      "Step 30: BSNE_Loss=0.3819,local_loss: 0.017692806369422998,global_loss: 0.3642418086528778\n",
      "Step 31: BSNE_Loss=0.3680,local_loss: 0.014917195410619324,global_loss: 0.35311341285705566\n",
      "Step 32: BSNE_Loss=0.3892,local_loss: 0.025501888087784384,global_loss: 0.3636969029903412\n",
      "Step 33: BSNE_Loss=0.0919,local_loss: 0.03549493641761747,global_loss: 0.0564245842397213\n",
      "Step 34: BSNE_Loss=0.1272,local_loss: 0.038850381766287995,global_loss: 0.0883670300245285\n",
      "Step 35: BSNE_Loss=0.3843,local_loss: 0.017373121478557026,global_loss: 0.3668968081474304\n",
      "Step 36: BSNE_Loss=0.3201,local_loss: 0.023992806429161594,global_loss: 0.29609936475753784\n",
      "Step 37: BSNE_Loss=0.2949,local_loss: 0.03831335951778938,global_loss: 0.25656357407569885\n",
      "Step 38: BSNE_Loss=0.4486,local_loss: 0.021498792133087186,global_loss: 0.42714205384254456\n",
      "Step 39: BSNE_Loss=0.5108,local_loss: 0.018883877228106385,global_loss: 0.4919266700744629\n",
      "Step 40: BSNE_Loss=0.1028,local_loss: 0.04213531832010815,global_loss: 0.060624171048402786\n",
      "Step 41: BSNE_Loss=0.3227,local_loss: 0.026171371915112665,global_loss: 0.2964940071105957\n",
      "Step 42: BSNE_Loss=0.1645,local_loss: 0.027669145168495543,global_loss: 0.13687558472156525\n",
      "Step 43: BSNE_Loss=0.3055,local_loss: 0.02223824630646193,global_loss: 0.2832197844982147\n",
      "Step 44: BSNE_Loss=0.3217,local_loss: 0.01631779657900617,global_loss: 0.3053339123725891\n",
      "Step 45: BSNE_Loss=0.2044,local_loss: 0.03189694654178251,global_loss: 0.1724909245967865\n",
      "Step 46: BSNE_Loss=0.1361,local_loss: 0.028221359940683786,global_loss: 0.10791858285665512\n",
      "Step 47: BSNE_Loss=0.4133,local_loss: 0.03237857097063674,global_loss: 0.38093411922454834\n",
      "Step 48: BSNE_Loss=0.2902,local_loss: 0.028943188504569856,global_loss: 0.261215478181839\n",
      "Step 49: BSNE_Loss=0.3337,local_loss: 0.031586080769969896,global_loss: 0.30209314823150635\n",
      "Step 50: BSNE_Loss=0.4173,local_loss: 0.023166139842679855,global_loss: 0.3940838873386383\n",
      "Step 51: BSNE_Loss=0.2370,local_loss: 0.03682768039268375,global_loss: 0.20013049244880676\n",
      "Step 52: BSNE_Loss=0.1453,local_loss: 0.039727057105698335,global_loss: 0.10556159913539886\n",
      "Step 53: BSNE_Loss=0.2102,local_loss: 0.039189288932338286,global_loss: 0.17101459205150604\n",
      "Step 54: BSNE_Loss=0.1591,local_loss: 0.03808682039438027,global_loss: 0.12100423127412796\n",
      "Step 55: BSNE_Loss=0.4735,local_loss: 0.022571745961577405,global_loss: 0.45096462965011597\n",
      "Step 56: BSNE_Loss=0.3271,local_loss: 0.021589413682209964,global_loss: 0.30552801489830017\n",
      "Step 57: BSNE_Loss=0.5310,local_loss: 0.034394050229302836,global_loss: 0.4965739846229553\n",
      "Step 58: BSNE_Loss=0.4067,local_loss: 0.025174369679506838,global_loss: 0.38148146867752075\n",
      "Step 59: BSNE_Loss=0.3931,local_loss: 0.02055298700513907,global_loss: 0.3725651800632477\n",
      "Step 60: BSNE_Loss=0.3672,local_loss: 0.018500726623606155,global_loss: 0.3486793637275696\n",
      "Step 61: BSNE_Loss=0.4279,local_loss: 0.021100623091513473,global_loss: 0.4067947566509247\n",
      "Step 62: BSNE_Loss=0.2794,local_loss: 0.02530094611353324,global_loss: 0.254145085811615\n",
      "Step 63: BSNE_Loss=0.1721,local_loss: 0.034518442155279404,global_loss: 0.13754133880138397\n",
      "Step 64: BSNE_Loss=0.3693,local_loss: 0.02267371725058145,global_loss: 0.346648246049881\n",
      "Step 65: BSNE_Loss=0.3809,local_loss: 0.025199422270778732,global_loss: 0.3556937277317047\n",
      "Step 66: BSNE_Loss=0.3173,local_loss: 0.01940262118759123,global_loss: 0.29790785908699036\n",
      "Step 67: BSNE_Loss=0.3857,local_loss: 0.029340140651084107,global_loss: 0.35633495450019836\n",
      "Step 68: BSNE_Loss=0.4344,local_loss: 0.01882253837267058,global_loss: 0.4155900180339813\n",
      "Step 69: BSNE_Loss=0.3373,local_loss: 0.025905927126930094,global_loss: 0.3114060163497925\n",
      "Step 70: BSNE_Loss=0.4743,local_loss: 0.0335914603478434,global_loss: 0.4407339096069336\n",
      "Step 71: BSNE_Loss=0.4110,local_loss: 0.026886738641426065,global_loss: 0.38411298394203186\n",
      "Step 72: BSNE_Loss=0.3743,local_loss: 0.01718871353247102,global_loss: 0.357128381729126\n",
      "Step 73: BSNE_Loss=0.0944,local_loss: 0.03153430562358002,global_loss: 0.0628846287727356\n",
      "Step 74: BSNE_Loss=0.1021,local_loss: 0.03256293866025184,global_loss: 0.06953020393848419\n",
      "Step 75: BSNE_Loss=0.1596,local_loss: 0.03243325659828781,global_loss: 0.12718231976032257\n",
      "Step 76: BSNE_Loss=0.6023,local_loss: 0.02903543429629751,global_loss: 0.5732454061508179\n",
      "Step 77: BSNE_Loss=-0.0644,local_loss: 0.03885895879054478,global_loss: -0.10329736769199371\n",
      "Center node 6184 finished at step 78 with loss -0.0644\n",
      "\n",
      "=== Pretraining Epoch 58 (Center Node: 4418) ===\n",
      "Step 0: BSNE_Loss=-0.0907,local_loss: 0.015465665858400802,global_loss: -0.10621470957994461\n",
      "Center node 4418 finished at step 1 with loss -0.0907\n",
      "\n",
      "=== Pretraining Epoch 59 (Center Node: 3623) ===\n",
      "Step 0: BSNE_Loss=0.4414,local_loss: 0.035473196240917354,global_loss: 0.4059429466724396\n",
      "Step 1: BSNE_Loss=0.3786,local_loss: 0.03201920967073879,global_loss: 0.34659937024116516\n",
      "Step 2: BSNE_Loss=0.5526,local_loss: 0.04652853802596571,global_loss: 0.506057858467102\n",
      "Step 3: BSNE_Loss=0.6746,local_loss: 0.03479973459312978,global_loss: 0.6398041844367981\n",
      "Step 4: BSNE_Loss=0.2166,local_loss: 0.03552196433479803,global_loss: 0.18106026947498322\n",
      "Step 5: BSNE_Loss=0.2688,local_loss: 0.05206932852130689,global_loss: 0.21674399077892303\n",
      "Step 6: BSNE_Loss=0.5044,local_loss: 0.029457222021563884,global_loss: 0.4748966693878174\n",
      "Step 7: BSNE_Loss=0.5494,local_loss: 0.03239826473648545,global_loss: 0.5170099139213562\n",
      "Step 8: BSNE_Loss=0.6136,local_loss: 0.030757403746651434,global_loss: 0.5828163623809814\n",
      "Step 9: BSNE_Loss=0.3175,local_loss: 0.0399903016313613,global_loss: 0.2775365710258484\n",
      "Step 10: BSNE_Loss=0.2198,local_loss: 0.04515842383113315,global_loss: 0.17463582754135132\n",
      "Step 11: BSNE_Loss=0.0110,local_loss: 0.04256980979522287,global_loss: -0.0315876342356205\n",
      "Center node 3623 finished at step 12 with loss 0.0110\n",
      "\n",
      "=== Pretraining Epoch 60 (Center Node: 190) ===\n",
      "Step 0: BSNE_Loss=0.1119,local_loss: 0.03591148605147914,global_loss: 0.07601965963840485\n",
      "Step 1: BSNE_Loss=-0.1365,local_loss: 0.03965496824960944,global_loss: -0.17613938450813293\n",
      "Center node 190 finished at step 2 with loss -0.1365\n",
      "\n",
      "=== Pretraining Epoch 61 (Center Node: 11279) ===\n",
      "Step 0: BSNE_Loss=0.2373,local_loss: 0.02082943987951298,global_loss: 0.21651092171669006\n",
      "Step 1: BSNE_Loss=0.2566,local_loss: 0.03326253001655717,global_loss: 0.2233695536851883\n",
      "Step 2: BSNE_Loss=0.3050,local_loss: 0.04015736073836925,global_loss: 0.26483988761901855\n",
      "Step 3: BSNE_Loss=-0.0103,local_loss: 0.0544306890213289,global_loss: -0.06469786167144775\n",
      "Center node 11279 finished at step 4 with loss -0.0103\n",
      "\n",
      "=== Pretraining Epoch 62 (Center Node: 8193) ===\n",
      "Step 0: BSNE_Loss=0.4117,local_loss: 0.05413609694762306,global_loss: 0.35759586095809937\n",
      "Step 1: BSNE_Loss=0.4119,local_loss: 0.02631246735733734,global_loss: 0.38559889793395996\n",
      "Step 2: BSNE_Loss=0.4794,local_loss: 0.045646926550328315,global_loss: 0.43371227383613586\n",
      "Step 3: BSNE_Loss=0.5169,local_loss: 0.04308583123167324,global_loss: 0.4738089442253113\n",
      "Step 4: BSNE_Loss=0.4735,local_loss: 0.050134339917231624,global_loss: 0.42335596680641174\n",
      "Step 5: BSNE_Loss=0.6466,local_loss: 0.04070875711289038,global_loss: 0.6059054732322693\n",
      "Step 6: BSNE_Loss=0.4814,local_loss: 0.05429622467484003,global_loss: 0.4270620346069336\n",
      "Step 7: BSNE_Loss=0.3602,local_loss: 0.04868396250426693,global_loss: 0.31155791878700256\n",
      "Step 8: BSNE_Loss=0.3569,local_loss: 0.046873552530221865,global_loss: 0.31001967191696167\n",
      "Step 9: BSNE_Loss=0.1877,local_loss: 0.057780637884104646,global_loss: 0.12993751466274261\n",
      "Step 10: BSNE_Loss=0.2957,local_loss: 0.04733979872511471,global_loss: 0.24835631251335144\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 11: BSNE_Loss=0.1994,local_loss: 0.05620563013786105,global_loss: 0.1431458741426468\n",
      "Step 12: BSNE_Loss=0.3016,local_loss: 0.052391596543115614,global_loss: 0.24921715259552002\n",
      "Step 13: BSNE_Loss=0.1692,local_loss: 0.07284395404746169,global_loss: 0.09636703133583069\n",
      "Step 14: BSNE_Loss=0.5107,local_loss: 0.047129648616085575,global_loss: 0.4635857343673706\n",
      "Step 15: BSNE_Loss=0.3601,local_loss: 0.044369586830857496,global_loss: 0.3157017230987549\n",
      "Step 16: BSNE_Loss=0.2331,local_loss: 0.05488433306791486,global_loss: 0.17819520831108093\n",
      "Step 17: BSNE_Loss=0.3943,local_loss: 0.03748892639835193,global_loss: 0.3567765951156616\n",
      "Step 18: BSNE_Loss=0.2384,local_loss: 0.07295640822655039,global_loss: 0.16541659832000732\n",
      "Step 19: BSNE_Loss=0.3161,local_loss: 0.05055820644840492,global_loss: 0.2655070424079895\n",
      "Step 20: BSNE_Loss=0.6084,local_loss: 0.05197453560743956,global_loss: 0.5563933849334717\n",
      "Step 21: BSNE_Loss=0.6518,local_loss: 0.04803092359724383,global_loss: 0.6037394404411316\n",
      "Step 22: BSNE_Loss=0.2367,local_loss: 0.057719548176360144,global_loss: 0.1789628118276596\n",
      "Step 23: BSNE_Loss=0.6121,local_loss: 0.04857294451872843,global_loss: 0.5634832978248596\n",
      "Step 24: BSNE_Loss=0.2093,local_loss: 0.06328906545701962,global_loss: 0.14600709080696106\n",
      "Step 25: BSNE_Loss=0.2666,local_loss: 0.06876124840543921,global_loss: 0.19781449437141418\n",
      "Step 26: BSNE_Loss=0.0320,local_loss: 0.09329542935492244,global_loss: -0.06129170581698418\n",
      "Center node 8193 finished at step 27 with loss 0.0320\n",
      "\n",
      "=== Pretraining Epoch 63 (Center Node: 133) ===\n",
      "Step 0: BSNE_Loss=0.4892,local_loss: 0.03712275973115888,global_loss: 0.45209911465644836\n",
      "Step 1: BSNE_Loss=0.3929,local_loss: 0.03581465505707646,global_loss: 0.35704198479652405\n",
      "Step 2: BSNE_Loss=-0.1538,local_loss: 0.0512718857526648,global_loss: -0.20504726469516754\n",
      "Center node 133 finished at step 3 with loss -0.1538\n",
      "\n",
      "=== Pretraining Epoch 64 (Center Node: 6983) ===\n",
      "Step 0: BSNE_Loss=0.5982,local_loss: 0.03460090519959831,global_loss: 0.563592791557312\n",
      "Step 1: BSNE_Loss=0.3180,local_loss: 0.038810013324964504,global_loss: 0.27916648983955383\n",
      "Step 2: BSNE_Loss=0.0179,local_loss: 0.037019045762112714,global_loss: -0.019161242991685867\n",
      "Center node 6983 finished at step 3 with loss 0.0179\n",
      "\n",
      "=== Pretraining Epoch 65 (Center Node: 8111) ===\n",
      "Step 0: BSNE_Loss=0.3900,local_loss: 0.05779259083475866,global_loss: 0.3322029113769531\n",
      "Step 1: BSNE_Loss=0.6469,local_loss: 0.06250606170063798,global_loss: 0.5844388008117676\n",
      "Step 2: BSNE_Loss=0.3400,local_loss: 0.04887891922628433,global_loss: 0.29109135270118713\n",
      "Step 3: BSNE_Loss=0.4979,local_loss: 0.06101419754356906,global_loss: 0.436869740486145\n",
      "Step 4: BSNE_Loss=0.4280,local_loss: 0.06720866649079418,global_loss: 0.36074337363243103\n",
      "Step 5: BSNE_Loss=0.4337,local_loss: 0.07128842732375107,global_loss: 0.3624206483364105\n",
      "Step 6: BSNE_Loss=0.4265,local_loss: 0.04906855823871013,global_loss: 0.37739863991737366\n",
      "Step 7: BSNE_Loss=0.3341,local_loss: 0.04042977207879703,global_loss: 0.2936243712902069\n",
      "Step 8: BSNE_Loss=0.7882,local_loss: 0.0424693690859079,global_loss: 0.7456820607185364\n",
      "Step 9: BSNE_Loss=0.5873,local_loss: 0.0642450509314701,global_loss: 0.5230764150619507\n",
      "Step 10: BSNE_Loss=0.2764,local_loss: 0.08133709253265341,global_loss: 0.1950182020664215\n",
      "Step 11: BSNE_Loss=0.2635,local_loss: 0.07461897620236757,global_loss: 0.18887852132320404\n",
      "Step 12: BSNE_Loss=0.3470,local_loss: 0.08033941799162292,global_loss: 0.2666873037815094\n",
      "Step 13: BSNE_Loss=0.4765,local_loss: 0.059994770541158204,global_loss: 0.4165019690990448\n",
      "Step 14: BSNE_Loss=0.3084,local_loss: 0.07332174686059927,global_loss: 0.23511122167110443\n",
      "Step 15: BSNE_Loss=0.3741,local_loss: 0.07778636503580381,global_loss: 0.296311616897583\n",
      "Step 16: BSNE_Loss=0.4353,local_loss: 0.0722720281564193,global_loss: 0.36307284235954285\n",
      "Step 17: BSNE_Loss=0.3524,local_loss: 0.07213898031705708,global_loss: 0.28026652336120605\n",
      "Step 18: BSNE_Loss=0.4267,local_loss: 0.06345835853969417,global_loss: 0.3632439374923706\n",
      "Step 19: BSNE_Loss=0.2311,local_loss: 0.06656659098476772,global_loss: 0.16453084349632263\n",
      "Step 20: BSNE_Loss=0.7235,local_loss: 0.040949527421094506,global_loss: 0.6825992465019226\n",
      "Step 21: BSNE_Loss=0.6221,local_loss: 0.06637249294422332,global_loss: 0.555689811706543\n",
      "Step 22: BSNE_Loss=0.3674,local_loss: 0.07065784429285786,global_loss: 0.29672306776046753\n",
      "Step 23: BSNE_Loss=0.4853,local_loss: 0.06506784805223295,global_loss: 0.420236200094223\n",
      "Step 24: BSNE_Loss=0.3833,local_loss: 0.05195277140738353,global_loss: 0.3313354253768921\n",
      "Step 25: BSNE_Loss=0.6842,local_loss: 0.05261828016566996,global_loss: 0.631568431854248\n",
      "Step 26: BSNE_Loss=0.1941,local_loss: 0.0729616330164336,global_loss: 0.12115969508886337\n",
      "Step 27: BSNE_Loss=0.0995,local_loss: 0.05540513814104697,global_loss: 0.04409395530819893\n",
      "Step 28: BSNE_Loss=0.3163,local_loss: 0.0697282037363694,global_loss: 0.24661238491535187\n",
      "Step 29: BSNE_Loss=0.2938,local_loss: 0.08746927471310678,global_loss: 0.20629003643989563\n",
      "Step 30: BSNE_Loss=0.0419,local_loss: 0.07511408697617658,global_loss: -0.03323747590184212\n",
      "Center node 8111 finished at step 31 with loss 0.0419\n",
      "\n",
      "=== Pretraining Epoch 66 (Center Node: 3610) ===\n",
      "Step 0: BSNE_Loss=0.4555,local_loss: 0.06543211567544108,global_loss: 0.3901011347770691\n",
      "Step 1: BSNE_Loss=0.0804,local_loss: 0.05470060294187209,global_loss: 0.025715870782732964\n",
      "Step 2: BSNE_Loss=0.5841,local_loss: 0.04683044724375282,global_loss: 0.5372986197471619\n",
      "Step 3: BSNE_Loss=0.4216,local_loss: 0.05667510440987737,global_loss: 0.36493104696273804\n",
      "Step 4: BSNE_Loss=0.6604,local_loss: 0.04808652536278417,global_loss: 0.6123182773590088\n",
      "Step 5: BSNE_Loss=0.3534,local_loss: 0.04717990276600546,global_loss: 0.3062577545642853\n",
      "Step 6: BSNE_Loss=0.3910,local_loss: 0.04242002922669626,global_loss: 0.34858036041259766\n",
      "Step 7: BSNE_Loss=0.1883,local_loss: 0.05288741109580357,global_loss: 0.13543283939361572\n",
      "Step 8: BSNE_Loss=0.6069,local_loss: 0.05375126678664347,global_loss: 0.5531474351882935\n",
      "Step 9: BSNE_Loss=0.8180,local_loss: 0.04797594679722131,global_loss: 0.770010769367218\n",
      "Step 10: BSNE_Loss=0.5736,local_loss: 0.044925541934592195,global_loss: 0.5286460518836975\n",
      "Step 11: BSNE_Loss=0.6536,local_loss: 0.037397257403991344,global_loss: 0.6161750555038452\n",
      "Step 12: BSNE_Loss=0.7162,local_loss: 0.036250949220187484,global_loss: 0.6799445152282715\n",
      "Step 13: BSNE_Loss=0.6382,local_loss: 0.05733911164814216,global_loss: 0.5808724164962769\n",
      "Step 14: BSNE_Loss=0.7332,local_loss: 0.03624078419242839,global_loss: 0.6969682574272156\n",
      "Step 15: BSNE_Loss=0.4944,local_loss: 0.04975030898736431,global_loss: 0.4446922838687897\n",
      "Step 16: BSNE_Loss=0.0168,local_loss: 0.04694604364580314,global_loss: -0.03015548549592495\n",
      "Center node 3610 finished at step 17 with loss 0.0168\n",
      "\n",
      "=== Pretraining Epoch 67 (Center Node: 1561) ===\n",
      "Step 0: BSNE_Loss=-0.0723,local_loss: 0.07056899488583301,global_loss: -0.14288181066513062\n",
      "Center node 1561 finished at step 1 with loss -0.0723\n",
      "\n",
      "=== Pretraining Epoch 68 (Center Node: 7552) ===\n",
      "Step 0: BSNE_Loss=0.7490,local_loss: 0.02605950505597997,global_loss: 0.7229706048965454\n",
      "Step 1: BSNE_Loss=0.2204,local_loss: 0.05182281508357968,global_loss: 0.16854408383369446\n",
      "Step 2: BSNE_Loss=0.2416,local_loss: 0.0541662700830345,global_loss: 0.18748193979263306\n",
      "Step 3: BSNE_Loss=0.0199,local_loss: 0.04501540405129923,global_loss: -0.02508990652859211\n",
      "Center node 7552 finished at step 4 with loss 0.0199\n",
      "\n",
      "=== Pretraining Epoch 69 (Center Node: 9635) ===\n",
      "Step 0: BSNE_Loss=-0.1038,local_loss: 0.05215814144913079,global_loss: -0.15597988665103912\n",
      "Center node 9635 finished at step 1 with loss -0.1038\n",
      "\n",
      "=== Pretraining Epoch 70 (Center Node: 6310) ===\n",
      "Step 0: BSNE_Loss=0.2376,local_loss: 0.05381821457149305,global_loss: 0.18374593555927277\n",
      "Step 1: BSNE_Loss=0.1920,local_loss: 0.07519105270742668,global_loss: 0.11679103970527649\n",
      "Step 2: BSNE_Loss=0.0673,local_loss: 0.06477976565292895,global_loss: 0.0025643580593168736\n",
      "Step 3: BSNE_Loss=0.3659,local_loss: 0.08525708244909097,global_loss: 0.28068897128105164\n",
      "Step 4: BSNE_Loss=0.3808,local_loss: 0.06577041556509143,global_loss: 0.3149923086166382\n",
      "Step 5: BSNE_Loss=0.4121,local_loss: 0.0723442408873197,global_loss: 0.3397320508956909\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 6: BSNE_Loss=0.1398,local_loss: 0.07386603716553507,global_loss: 0.06596861779689789\n",
      "Step 7: BSNE_Loss=0.0418,local_loss: 0.08659226583125243,global_loss: -0.04477483779191971\n",
      "Center node 6310 finished at step 8 with loss 0.0418\n",
      "\n",
      "=== Pretraining Epoch 71 (Center Node: 10608) ===\n",
      "Step 0: BSNE_Loss=0.1835,local_loss: 0.10806845960017775,global_loss: 0.07542110234498978\n",
      "Step 1: BSNE_Loss=0.1781,local_loss: 0.08311044198001208,global_loss: 0.09496454149484634\n",
      "Step 2: BSNE_Loss=0.1853,local_loss: 0.08724144868077037,global_loss: 0.09810835123062134\n",
      "Step 3: BSNE_Loss=0.2025,local_loss: 0.05679555499263944,global_loss: 0.14570562541484833\n",
      "Step 4: BSNE_Loss=0.0241,local_loss: 0.06698754751140931,global_loss: -0.04283794015645981\n",
      "Center node 10608 finished at step 5 with loss 0.0241\n",
      "\n",
      "=== Pretraining Epoch 72 (Center Node: 1263) ===\n",
      "Step 0: BSNE_Loss=0.5519,local_loss: 0.06905303139715217,global_loss: 0.48282137513160706\n",
      "Step 1: BSNE_Loss=0.1391,local_loss: 0.07007016739019484,global_loss: 0.06905993074178696\n",
      "Step 2: BSNE_Loss=0.4310,local_loss: 0.06038835315713648,global_loss: 0.37057793140411377\n",
      "Step 3: BSNE_Loss=0.0812,local_loss: 0.05883046336868502,global_loss: 0.022341113537549973\n",
      "Step 4: BSNE_Loss=0.0809,local_loss: 0.08586789358057507,global_loss: -0.004971803165972233\n",
      "Step 5: BSNE_Loss=0.2801,local_loss: 0.08620888491397663,global_loss: 0.19388720393180847\n",
      "Step 6: BSNE_Loss=0.0413,local_loss: 0.09544260149530019,global_loss: -0.054126277565956116\n",
      "Center node 1263 finished at step 7 with loss 0.0413\n",
      "\n",
      "=== Pretraining Epoch 73 (Center Node: 409) ===\n",
      "Step 0: BSNE_Loss=0.4522,local_loss: 0.060987708161927015,global_loss: 0.3912258446216583\n",
      "Step 1: BSNE_Loss=-0.0562,local_loss: 0.056376611840059425,global_loss: -0.11256515234708786\n",
      "Center node 409 finished at step 2 with loss -0.0562\n",
      "\n",
      "=== Pretraining Epoch 74 (Center Node: 10025) ===\n",
      "Step 0: BSNE_Loss=0.1635,local_loss: 0.06542567927947399,global_loss: 0.09804891049861908\n",
      "Step 1: BSNE_Loss=-0.1267,local_loss: 0.08334766237116079,global_loss: -0.21007566154003143\n",
      "Center node 10025 finished at step 2 with loss -0.1267\n",
      "\n",
      "=== Pretraining Epoch 75 (Center Node: 10606) ===\n",
      "Step 0: BSNE_Loss=0.0849,local_loss: 0.05617347059703129,global_loss: 0.02872801572084427\n",
      "Step 1: BSNE_Loss=-0.1859,local_loss: 0.044127192828310444,global_loss: -0.23005160689353943\n",
      "Center node 10606 finished at step 2 with loss -0.1859\n",
      "\n",
      "=== Pretraining Epoch 76 (Center Node: 1583) ===\n",
      "Step 0: BSNE_Loss=-0.1454,local_loss: 0.04655221841080805,global_loss: -0.19190847873687744\n",
      "Center node 1583 finished at step 1 with loss -0.1454\n",
      "\n",
      "=== Pretraining Epoch 77 (Center Node: 1449) ===\n",
      "Step 0: BSNE_Loss=0.0122,local_loss: 0.03467421738432195,global_loss: -0.022486386820673943\n",
      "Center node 1449 finished at step 1 with loss 0.0122\n",
      "\n",
      "=== Pretraining Epoch 78 (Center Node: 472) ===\n",
      "Step 0: BSNE_Loss=-0.0647,local_loss: 0.0831445998059922,global_loss: -0.14783968031406403\n",
      "Center node 472 finished at step 1 with loss -0.0647\n",
      "\n",
      "=== Pretraining Epoch 79 (Center Node: 5501) ===\n",
      "Step 0: BSNE_Loss=0.5430,local_loss: 0.0330880714536658,global_loss: 0.5099196434020996\n",
      "Step 1: BSNE_Loss=0.5904,local_loss: 0.04010663813500162,global_loss: 0.5502803325653076\n",
      "Step 2: BSNE_Loss=0.5127,local_loss: 0.03590804820722433,global_loss: 0.4767770767211914\n",
      "Step 3: BSNE_Loss=0.5466,local_loss: 0.03154833105961638,global_loss: 0.5150080323219299\n",
      "Step 4: BSNE_Loss=0.5920,local_loss: 0.03451925936847738,global_loss: 0.5574464201927185\n",
      "Step 5: BSNE_Loss=0.4730,local_loss: 0.029009317383389632,global_loss: 0.4440184533596039\n",
      "Step 6: BSNE_Loss=0.5739,local_loss: 0.031768770846960684,global_loss: 0.5421773791313171\n",
      "Step 7: BSNE_Loss=0.5467,local_loss: 0.01932173626482312,global_loss: 0.5273832082748413\n",
      "Step 8: BSNE_Loss=0.5404,local_loss: 0.011400666375330645,global_loss: 0.5289918780326843\n",
      "Step 9: BSNE_Loss=0.4787,local_loss: 0.012711142745493658,global_loss: 0.4659935235977173\n",
      "Step 10: BSNE_Loss=0.4213,local_loss: 0.02002489206625773,global_loss: 0.40132227540016174\n",
      "Step 11: BSNE_Loss=0.4272,local_loss: 0.017337697437785748,global_loss: 0.40982934832572937\n",
      "Step 12: BSNE_Loss=0.3643,local_loss: 0.025776317196078705,global_loss: 0.3384906053543091\n",
      "Step 13: BSNE_Loss=0.2401,local_loss: 0.027658528398224115,global_loss: 0.2124631702899933\n",
      "Step 14: BSNE_Loss=0.5017,local_loss: 0.020414136470005492,global_loss: 0.48132771253585815\n",
      "Step 15: BSNE_Loss=0.4240,local_loss: 0.021941857888064367,global_loss: 0.402074933052063\n",
      "Step 16: BSNE_Loss=0.0650,local_loss: 0.03433013262680448,global_loss: 0.03063478320837021\n",
      "Step 17: BSNE_Loss=0.1782,local_loss: 0.0524423093704214,global_loss: 0.1257523149251938\n",
      "Step 18: BSNE_Loss=0.5127,local_loss: 0.03673464507480248,global_loss: 0.47592389583587646\n",
      "Step 19: BSNE_Loss=0.2081,local_loss: 0.03724948311516983,global_loss: 0.17081986367702484\n",
      "Step 20: BSNE_Loss=0.5074,local_loss: 0.027417250328113596,global_loss: 0.4799351096153259\n",
      "Step 21: BSNE_Loss=0.2546,local_loss: 0.03321087492527122,global_loss: 0.2213793247938156\n",
      "Step 22: BSNE_Loss=0.2334,local_loss: 0.04701911796707089,global_loss: 0.18638531863689423\n",
      "Step 23: BSNE_Loss=0.2185,local_loss: 0.021726383368240988,global_loss: 0.19677311182022095\n",
      "Step 24: BSNE_Loss=0.3875,local_loss: 0.037369448489542094,global_loss: 0.3501685559749603\n",
      "Step 25: BSNE_Loss=0.2811,local_loss: 0.03280503241087371,global_loss: 0.24831828474998474\n",
      "Step 26: BSNE_Loss=0.4756,local_loss: 0.024928378346054957,global_loss: 0.4506263732910156\n",
      "Step 27: BSNE_Loss=0.3907,local_loss: 0.03944333354876254,global_loss: 0.3512261211872101\n",
      "Step 28: BSNE_Loss=-0.0449,local_loss: 0.04184036983780406,global_loss: -0.08675714582204819\n",
      "Center node 5501 finished at step 29 with loss -0.0449\n",
      "\n",
      "=== Pretraining Epoch 80 (Center Node: 7205) ===\n",
      "Step 0: BSNE_Loss=-0.0938,local_loss: 0.041631940428690464,global_loss: -0.13541476428508759\n",
      "Center node 7205 finished at step 1 with loss -0.0938\n",
      "\n",
      "=== Pretraining Epoch 81 (Center Node: 10490) ===\n",
      "Step 0: BSNE_Loss=0.4962,local_loss: 0.02802089822682019,global_loss: 0.4681953191757202\n",
      "Step 1: BSNE_Loss=0.3331,local_loss: 0.047588525877262006,global_loss: 0.28556063771247864\n",
      "Step 2: BSNE_Loss=0.3747,local_loss: 0.03866065073939101,global_loss: 0.3360097408294678\n",
      "Step 3: BSNE_Loss=0.1692,local_loss: 0.0278304878776821,global_loss: 0.14135773479938507\n",
      "Step 4: BSNE_Loss=0.2800,local_loss: 0.024888341069689957,global_loss: 0.25508299469947815\n",
      "Step 5: BSNE_Loss=0.2182,local_loss: 0.029806036281306656,global_loss: 0.18843618035316467\n",
      "Step 6: BSNE_Loss=0.2794,local_loss: 0.03445156473757932,global_loss: 0.24491505324840546\n",
      "Step 7: BSNE_Loss=0.2095,local_loss: 0.04511885830362233,global_loss: 0.16436932981014252\n",
      "Step 8: BSNE_Loss=0.3778,local_loss: 0.05077008514720718,global_loss: 0.327038437128067\n",
      "Step 9: BSNE_Loss=0.2875,local_loss: 0.04962596905566724,global_loss: 0.23791852593421936\n",
      "Step 10: BSNE_Loss=0.2333,local_loss: 0.0449590258170949,global_loss: 0.18829508125782013\n",
      "Step 11: BSNE_Loss=0.3442,local_loss: 0.03726434680063908,global_loss: 0.3069762587547302\n",
      "Step 12: BSNE_Loss=0.5822,local_loss: 0.03929214392482173,global_loss: 0.5428986549377441\n",
      "Step 13: BSNE_Loss=0.6129,local_loss: 0.03731509712311228,global_loss: 0.5755829811096191\n",
      "Step 14: BSNE_Loss=0.4775,local_loss: 0.034767542149971936,global_loss: 0.44277599453926086\n",
      "Step 15: BSNE_Loss=0.2444,local_loss: 0.05541649920955375,global_loss: 0.18899111449718475\n",
      "Step 16: BSNE_Loss=0.1518,local_loss: 0.04263676886638399,global_loss: 0.10920364409685135\n",
      "Step 17: BSNE_Loss=0.1838,local_loss: 0.045211532436423174,global_loss: 0.13854742050170898\n",
      "Step 18: BSNE_Loss=0.2141,local_loss: 0.0279655133583595,global_loss: 0.18611359596252441\n",
      "Step 19: BSNE_Loss=0.2201,local_loss: 0.029893411669819042,global_loss: 0.19021569192409515\n",
      "Step 20: BSNE_Loss=0.1813,local_loss: 0.05591083664163822,global_loss: 0.12543688714504242\n",
      "Step 21: BSNE_Loss=0.5116,local_loss: 0.06315996798539524,global_loss: 0.4484444260597229\n",
      "Step 22: BSNE_Loss=0.3776,local_loss: 0.040650435638520714,global_loss: 0.3369114100933075\n",
      "Step 23: BSNE_Loss=0.2265,local_loss: 0.03496227219300027,global_loss: 0.1915704607963562\n",
      "Step 24: BSNE_Loss=0.4073,local_loss: 0.032181950396191324,global_loss: 0.3751007318496704\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 25: BSNE_Loss=0.6220,local_loss: 0.02902686131955872,global_loss: 0.5929513573646545\n",
      "Step 26: BSNE_Loss=0.5115,local_loss: 0.06391017860027022,global_loss: 0.4476030766963959\n",
      "Step 27: BSNE_Loss=0.4963,local_loss: 0.03930561370607456,global_loss: 0.45702165365219116\n",
      "Step 28: BSNE_Loss=0.1557,local_loss: 0.06579882408751439,global_loss: 0.08992916345596313\n",
      "Step 29: BSNE_Loss=0.2455,local_loss: 0.03852821940566985,global_loss: 0.20701059699058533\n",
      "Step 30: BSNE_Loss=0.4175,local_loss: 0.04006512207447227,global_loss: 0.3774570822715759\n",
      "Step 31: BSNE_Loss=-0.1568,local_loss: 0.05793793903910599,global_loss: -0.21473494172096252\n",
      "Center node 10490 finished at step 32 with loss -0.1568\n",
      "\n",
      "=== Pretraining Epoch 82 (Center Node: 9507) ===\n",
      "Step 0: BSNE_Loss=0.4228,local_loss: 0.05743590164282657,global_loss: 0.3653459846973419\n",
      "Step 1: BSNE_Loss=0.7062,local_loss: 0.07835274127439673,global_loss: 0.6278447508811951\n",
      "Step 2: BSNE_Loss=0.2777,local_loss: 0.08060519547938003,global_loss: 0.197092667222023\n",
      "Step 3: BSNE_Loss=0.4578,local_loss: 0.09084507524282046,global_loss: 0.3669132590293884\n",
      "Step 4: BSNE_Loss=0.1975,local_loss: 0.06959921969491106,global_loss: 0.12791308760643005\n",
      "Step 5: BSNE_Loss=0.6777,local_loss: 0.06002741698955601,global_loss: 0.6176453232765198\n",
      "Step 6: BSNE_Loss=0.2148,local_loss: 0.08664946737316712,global_loss: 0.12815934419631958\n",
      "Step 7: BSNE_Loss=0.3816,local_loss: 0.0538405925500384,global_loss: 0.32780322432518005\n",
      "Step 8: BSNE_Loss=0.3851,local_loss: 0.06743937388016223,global_loss: 0.31765198707580566\n",
      "Step 9: BSNE_Loss=0.2658,local_loss: 0.06817756201303882,global_loss: 0.19761024415493011\n",
      "Step 10: BSNE_Loss=0.3350,local_loss: 0.05931633212699271,global_loss: 0.2756390869617462\n",
      "Step 11: BSNE_Loss=0.1755,local_loss: 0.07875789101618791,global_loss: 0.09674261510372162\n",
      "Step 12: BSNE_Loss=0.3900,local_loss: 0.07390648028440816,global_loss: 0.3160618245601654\n",
      "Step 13: BSNE_Loss=0.6538,local_loss: 0.06394158864680387,global_loss: 0.5898236036300659\n",
      "Step 14: BSNE_Loss=-0.0330,local_loss: 0.07350234502406323,global_loss: -0.10652954131364822\n",
      "Center node 9507 finished at step 15 with loss -0.0330\n",
      "\n",
      "=== Pretraining Epoch 83 (Center Node: 8985) ===\n",
      "Step 0: BSNE_Loss=0.4213,local_loss: 0.0319574136417503,global_loss: 0.38937947154045105\n",
      "Step 1: BSNE_Loss=0.0926,local_loss: 0.0406324707609833,global_loss: 0.05191894620656967\n",
      "Step 2: BSNE_Loss=0.1161,local_loss: 0.029500115291757528,global_loss: 0.08663827180862427\n",
      "Step 3: BSNE_Loss=0.1200,local_loss: 0.038054816503345176,global_loss: 0.0819605365395546\n",
      "Step 4: BSNE_Loss=0.1407,local_loss: 0.04226595303323627,global_loss: 0.09840214997529984\n",
      "Step 5: BSNE_Loss=-0.2515,local_loss: 0.055128485984638625,global_loss: -0.30666664242744446\n",
      "Center node 8985 finished at step 6 with loss -0.2515\n",
      "\n",
      "=== Pretraining Epoch 84 (Center Node: 2752) ===\n",
      "Step 0: BSNE_Loss=0.1414,local_loss: 0.06279447825568732,global_loss: 0.0786098837852478\n",
      "Step 1: BSNE_Loss=0.0294,local_loss: 0.06529196839220258,global_loss: -0.03590543940663338\n",
      "Center node 2752 finished at step 2 with loss 0.0294\n",
      "\n",
      "=== Pretraining Epoch 85 (Center Node: 1990) ===\n",
      "Step 0: BSNE_Loss=0.0559,local_loss: 0.056247158665778826,global_loss: -0.0003345572331454605\n",
      "Step 1: BSNE_Loss=0.4085,local_loss: 0.06478588913422964,global_loss: 0.34370237588882446\n",
      "Step 2: BSNE_Loss=0.3709,local_loss: 0.03982133142057608,global_loss: 0.331093430519104\n",
      "Step 3: BSNE_Loss=-0.1575,local_loss: 0.0512895995040412,global_loss: -0.2088238000869751\n",
      "Center node 1990 finished at step 4 with loss -0.1575\n",
      "\n",
      "=== Pretraining Epoch 86 (Center Node: 10362) ===\n",
      "Step 0: BSNE_Loss=0.0343,local_loss: 0.04764338831709426,global_loss: -0.013388996943831444\n",
      "Center node 10362 finished at step 1 with loss 0.0343\n",
      "\n",
      "=== Pretraining Epoch 87 (Center Node: 1722) ===\n",
      "Step 0: BSNE_Loss=0.2861,local_loss: 0.049071551127183005,global_loss: 0.23703548312187195\n",
      "Step 1: BSNE_Loss=0.2698,local_loss: 0.05471009057713798,global_loss: 0.21509532630443573\n",
      "Step 2: BSNE_Loss=-0.0156,local_loss: 0.08942678287776903,global_loss: -0.10504806041717529\n",
      "Center node 1722 finished at step 3 with loss -0.0156\n",
      "\n",
      "=== Pretraining Epoch 88 (Center Node: 2359) ===\n",
      "Step 0: BSNE_Loss=0.2875,local_loss: 0.060611567862516,global_loss: 0.22691670060157776\n",
      "Step 1: BSNE_Loss=-0.0092,local_loss: 0.03788323267380754,global_loss: -0.04710135981440544\n",
      "Center node 2359 finished at step 2 with loss -0.0092\n",
      "\n",
      "=== Pretraining Epoch 89 (Center Node: 5972) ===\n",
      "Step 0: BSNE_Loss=0.1177,local_loss: 0.0849866935768129,global_loss: 0.032676152884960175\n",
      "Step 1: BSNE_Loss=-0.0038,local_loss: 0.10152099624123272,global_loss: -0.10527749359607697\n",
      "Center node 5972 finished at step 2 with loss -0.0038\n",
      "\n",
      "=== Pretraining Epoch 90 (Center Node: 9636) ===\n",
      "Step 0: BSNE_Loss=0.2424,local_loss: 0.04693357960316619,global_loss: 0.19548176229000092\n",
      "Step 1: BSNE_Loss=0.4941,local_loss: 0.05145434494686446,global_loss: 0.4426426887512207\n",
      "Step 2: BSNE_Loss=0.4052,local_loss: 0.050014390294909256,global_loss: 0.3552107512950897\n",
      "Step 3: BSNE_Loss=0.1949,local_loss: 0.05955003935477947,global_loss: 0.13536526262760162\n",
      "Step 4: BSNE_Loss=0.2015,local_loss: 0.061431403522960476,global_loss: 0.14005793631076813\n",
      "Step 5: BSNE_Loss=0.4850,local_loss: 0.0456430931795479,global_loss: 0.43939197063446045\n",
      "Step 6: BSNE_Loss=0.3798,local_loss: 0.06213980390005538,global_loss: 0.31766828894615173\n",
      "Step 7: BSNE_Loss=0.3938,local_loss: 0.039304152705521386,global_loss: 0.3544764220714569\n",
      "Step 8: BSNE_Loss=0.4064,local_loss: 0.05359329997877672,global_loss: 0.35281288623809814\n",
      "Step 9: BSNE_Loss=0.3874,local_loss: 0.05488616012762955,global_loss: 0.3324736952781677\n",
      "Step 10: BSNE_Loss=0.4241,local_loss: 0.05279750240220525,global_loss: 0.3712804913520813\n",
      "Step 11: BSNE_Loss=0.2218,local_loss: 0.04346083659074772,global_loss: 0.17833375930786133\n",
      "Step 12: BSNE_Loss=0.9516,local_loss: 0.03779962831583285,global_loss: 0.9138253927230835\n",
      "Step 13: BSNE_Loss=-0.2055,local_loss: 0.06460222835387672,global_loss: -0.2700912654399872\n",
      "Center node 9636 finished at step 14 with loss -0.2055\n",
      "\n",
      "=== Pretraining Epoch 91 (Center Node: 6812) ===\n",
      "Step 0: BSNE_Loss=0.7453,local_loss: 0.035078088450573745,global_loss: 0.7101743221282959\n",
      "Step 1: BSNE_Loss=0.7806,local_loss: 0.03708378520720554,global_loss: 0.743549644947052\n",
      "Step 2: BSNE_Loss=0.6646,local_loss: 0.034643948545880966,global_loss: 0.6299930214881897\n",
      "Step 3: BSNE_Loss=0.8042,local_loss: 0.0345781237068247,global_loss: 0.7696545124053955\n",
      "Step 4: BSNE_Loss=0.7787,local_loss: 0.0367259713643863,global_loss: 0.7419449090957642\n",
      "Step 5: BSNE_Loss=0.8481,local_loss: 0.03562683064760458,global_loss: 0.8125203251838684\n",
      "Step 6: BSNE_Loss=0.7564,local_loss: 0.018670832320060282,global_loss: 0.7376964092254639\n",
      "Step 7: BSNE_Loss=0.8577,local_loss: 0.019533326733391067,global_loss: 0.83819979429245\n",
      "Step 8: BSNE_Loss=0.5570,local_loss: 0.022371680690697685,global_loss: 0.5345810651779175\n",
      "Step 9: BSNE_Loss=0.4919,local_loss: 0.0287190596685215,global_loss: 0.4631825089454651\n",
      "Step 10: BSNE_Loss=0.4014,local_loss: 0.05380354509703096,global_loss: 0.3475843369960785\n",
      "Step 11: BSNE_Loss=0.2874,local_loss: 0.06996811771972204,global_loss: 0.21739673614501953\n",
      "Step 12: BSNE_Loss=0.2827,local_loss: 0.06377531723749755,global_loss: 0.21889661252498627\n",
      "Step 13: BSNE_Loss=0.2210,local_loss: 0.053981987061321735,global_loss: 0.1670074313879013\n",
      "Step 14: BSNE_Loss=0.1689,local_loss: 0.08326361593351914,global_loss: 0.0856146588921547\n",
      "Step 15: BSNE_Loss=0.1598,local_loss: 0.0674372223756679,global_loss: 0.09239095449447632\n",
      "Step 16: BSNE_Loss=0.2059,local_loss: 0.05578271620579989,global_loss: 0.1501406580209732\n",
      "Step 17: BSNE_Loss=0.4560,local_loss: 0.053766570784091985,global_loss: 0.4022451639175415\n",
      "Step 18: BSNE_Loss=0.2910,local_loss: 0.07134487821234886,global_loss: 0.21970489621162415\n",
      "Step 19: BSNE_Loss=0.1275,local_loss: 0.06500365241350463,global_loss: 0.06252164393663406\n",
      "Step 20: BSNE_Loss=0.1923,local_loss: 0.07521560030390734,global_loss: 0.11712361872196198\n",
      "Step 21: BSNE_Loss=0.0771,local_loss: 0.07634651681901256,global_loss: 0.0007192885386757553\n",
      "Step 22: BSNE_Loss=0.4432,local_loss: 0.03688187986490158,global_loss: 0.4063584506511688\n",
      "Step 23: BSNE_Loss=-0.0267,local_loss: 0.07053869992039716,global_loss: -0.09722340106964111\n",
      "Center node 6812 finished at step 24 with loss -0.0267\n",
      "\n",
      "=== Pretraining Epoch 92 (Center Node: 10752) ===\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 0: BSNE_Loss=0.4781,local_loss: 0.04766905885532878,global_loss: 0.430456280708313\n",
      "Step 1: BSNE_Loss=0.0237,local_loss: 0.04661142243436833,global_loss: -0.022911492735147476\n",
      "Center node 10752 finished at step 2 with loss 0.0237\n",
      "\n",
      "=== Pretraining Epoch 93 (Center Node: 151) ===\n",
      "Step 0: BSNE_Loss=0.2640,local_loss: 0.02169378460068079,global_loss: 0.24229708313941956\n",
      "Step 1: BSNE_Loss=0.0545,local_loss: 0.03690787736719159,global_loss: 0.01760229840874672\n",
      "Step 2: BSNE_Loss=0.4397,local_loss: 0.02418928154021513,global_loss: 0.41552770137786865\n",
      "Step 3: BSNE_Loss=0.5809,local_loss: 0.03974979651626512,global_loss: 0.541174590587616\n",
      "Step 4: BSNE_Loss=0.3519,local_loss: 0.036547319238396575,global_loss: 0.31533747911453247\n",
      "Step 5: BSNE_Loss=0.2785,local_loss: 0.04013714019225137,global_loss: 0.23838657140731812\n",
      "Step 6: BSNE_Loss=0.4042,local_loss: 0.031789579823223266,global_loss: 0.3723606467247009\n",
      "Step 7: BSNE_Loss=-0.1603,local_loss: 0.0672861830220254,global_loss: -0.22755363583564758\n",
      "Center node 151 finished at step 8 with loss -0.1603\n",
      "\n",
      "=== Pretraining Epoch 94 (Center Node: 8009) ===\n",
      "Step 0: BSNE_Loss=0.0660,local_loss: 0.038780740212924436,global_loss: 0.027193088084459305\n",
      "Step 1: BSNE_Loss=0.0938,local_loss: 0.04455595865896076,global_loss: 0.049196481704711914\n",
      "Step 5: BSNE_Loss=0.2691,local_loss: 0.0667159358628309,global_loss: 0.20241688191890717\n",
      "Step 6: BSNE_Loss=0.4525,local_loss: 0.04274094646352107,global_loss: 0.40971627831459045\n",
      "Step 7: BSNE_Loss=0.1137,local_loss: 0.06815099165921887,global_loss: 0.04554064944386482\n",
      "Step 8: BSNE_Loss=0.2954,local_loss: 0.05776058361217948,global_loss: 0.2376774698495865\n",
      "Step 9: BSNE_Loss=-0.0207,local_loss: 0.07750193764686275,global_loss: -0.09817196428775787\n",
      "Center node 8009 finished at step 10 with loss -0.0207\n",
      "\n",
      "=== Pretraining Epoch 95 (Center Node: 5849) ===\n",
      "Step 0: BSNE_Loss=0.5901,local_loss: 0.0304463912127444,global_loss: 0.559641420841217\n",
      "Step 1: BSNE_Loss=0.5198,local_loss: 0.03056291199377526,global_loss: 0.4892440736293793\n",
      "Step 2: BSNE_Loss=0.4764,local_loss: 0.03636427054431539,global_loss: 0.44007453322410583\n",
      "Step 3: BSNE_Loss=0.2021,local_loss: 0.03995913747432196,global_loss: 0.16212300956249237\n",
      "Step 4: BSNE_Loss=0.3258,local_loss: 0.0426179168400678,global_loss: 0.2832072079181671\n",
      "Step 5: BSNE_Loss=0.5968,local_loss: 0.03368639676722653,global_loss: 0.5631524324417114\n",
      "Step 6: BSNE_Loss=0.1266,local_loss: 0.04814039903233923,global_loss: 0.07850398123264313\n",
      "Step 7: BSNE_Loss=0.5659,local_loss: 0.04577373819009158,global_loss: 0.520125150680542\n",
      "Step 8: BSNE_Loss=0.3131,local_loss: 0.0379642544563346,global_loss: 0.2751617133617401\n",
      "Step 9: BSNE_Loss=0.3200,local_loss: 0.035490206721795564,global_loss: 0.2845083177089691\n",
      "Step 10: BSNE_Loss=0.3466,local_loss: 0.04406250452079624,global_loss: 0.30254557728767395\n",
      "Step 11: BSNE_Loss=0.1990,local_loss: 0.04280062420458258,global_loss: 0.15615880489349365\n",
      "Step 12: BSNE_Loss=0.1738,local_loss: 0.03430554766749364,global_loss: 0.1395104080438614\n",
      "Step 13: BSNE_Loss=0.3981,local_loss: 0.04431652343584931,global_loss: 0.35381484031677246\n",
      "Step 14: BSNE_Loss=0.2652,local_loss: 0.0341057082135828,global_loss: 0.23112031817436218\n",
      "Step 15: BSNE_Loss=0.1493,local_loss: 0.0466372691003496,global_loss: 0.10269540548324585\n",
      "Step 16: BSNE_Loss=0.2556,local_loss: 0.03704878978715818,global_loss: 0.21853548288345337\n",
      "Step 17: BSNE_Loss=0.2670,local_loss: 0.036939325707685836,global_loss: 0.23006212711334229\n",
      "Step 18: BSNE_Loss=0.4118,local_loss: 0.04303182793980265,global_loss: 0.3688000440597534\n",
      "Step 19: BSNE_Loss=0.2620,local_loss: 0.0410771820291244,global_loss: 0.2209385186433792\n",
      "Step 20: BSNE_Loss=0.5984,local_loss: 0.04416952878109806,global_loss: 0.5542522072792053\n",
      "Step 21: BSNE_Loss=0.3491,local_loss: 0.029398104181408077,global_loss: 0.31966546177864075\n",
      "Step 22: BSNE_Loss=0.2095,local_loss: 0.03352487043591546,global_loss: 0.17599685490131378\n",
      "Step 23: BSNE_Loss=0.2448,local_loss: 0.040443692107173634,global_loss: 0.2043795883655548\n",
      "Step 24: BSNE_Loss=0.1475,local_loss: 0.051931746316242686,global_loss: 0.09555606544017792\n",
      "Step 25: BSNE_Loss=0.3198,local_loss: 0.04176573085463286,global_loss: 0.27804484963417053\n",
      "Step 26: BSNE_Loss=0.3012,local_loss: 0.05013384470393038,global_loss: 0.2510504126548767\n",
      "Step 27: BSNE_Loss=-0.0152,local_loss: 0.039854248765345476,global_loss: -0.05506182834506035\n",
      "Center node 5849 finished at step 28 with loss -0.0152\n",
      "\n",
      "=== Pretraining Epoch 96 (Center Node: 8295) ===\n",
      "Step 0: BSNE_Loss=0.3847,local_loss: 0.059671337670756844,global_loss: 0.32501623034477234\n",
      "Step 1: BSNE_Loss=0.1592,local_loss: 0.07538296464957236,global_loss: 0.08380647748708725\n",
      "Step 2: BSNE_Loss=0.2541,local_loss: 0.05224687192738863,global_loss: 0.20188844203948975\n",
      "Step 3: BSNE_Loss=0.2617,local_loss: 0.05674651921006698,global_loss: 0.2049511969089508\n",
      "Step 4: BSNE_Loss=0.1254,local_loss: 0.05902214355519172,global_loss: 0.0664142444729805\n",
      "Step 5: BSNE_Loss=0.0461,local_loss: 0.06806952279735261,global_loss: -0.022016433998942375\n",
      "Center node 8295 finished at step 6 with loss 0.0461\n",
      "\n",
      "=== Pretraining Epoch 97 (Center Node: 7175) ===\n",
      "Step 0: BSNE_Loss=0.3867,local_loss: 0.0443807856077186,global_loss: 0.3423366844654083\n",
      "Step 1: BSNE_Loss=0.2907,local_loss: 0.04688706777123068,global_loss: 0.2438151240348816\n",
      "Step 2: BSNE_Loss=0.4604,local_loss: 0.03480215612557301,global_loss: 0.42559483647346497\n",
      "Step 3: BSNE_Loss=0.3072,local_loss: 0.04406940876529919,global_loss: 0.26314759254455566\n",
      "Step 4: BSNE_Loss=0.6155,local_loss: 0.0428952402231742,global_loss: 0.5725619792938232\n",
      "Step 5: BSNE_Loss=0.3367,local_loss: 0.04338539826835182,global_loss: 0.29333972930908203\n",
      "Step 6: BSNE_Loss=0.4576,local_loss: 0.04766482943969065,global_loss: 0.40989747643470764\n",
      "Step 7: BSNE_Loss=0.4237,local_loss: 0.05711011506582016,global_loss: 0.36661115288734436\n",
      "Step 8: BSNE_Loss=-0.0226,local_loss: 0.06837647136874758,global_loss: -0.0909484475851059\n",
      "Center node 7175 finished at step 9 with loss -0.0226\n",
      "\n",
      "=== Pretraining Epoch 98 (Center Node: 6158) ===\n",
      "Step 0: BSNE_Loss=0.7005,local_loss: 0.06795333116937352,global_loss: 0.6325640678405762\n",
      "Step 1: BSNE_Loss=0.7071,local_loss: 0.06363824389508436,global_loss: 0.6434250473976135\n",
      "Step 2: BSNE_Loss=0.5405,local_loss: 0.06198769800051562,global_loss: 0.47855591773986816\n",
      "Step 3: BSNE_Loss=0.7500,local_loss: 0.06938818369198237,global_loss: 0.680614709854126\n",
      "Step 4: BSNE_Loss=0.5919,local_loss: 0.06825789862978682,global_loss: 0.5235978960990906\n",
      "Step 5: BSNE_Loss=0.7952,local_loss: 0.05373034333182848,global_loss: 0.741476833820343\n",
      "Step 6: BSNE_Loss=0.7509,local_loss: 0.06315443160399556,global_loss: 0.6877294778823853\n",
      "Step 7: BSNE_Loss=0.5354,local_loss: 0.04776126161893894,global_loss: 0.4876552224159241\n",
      "Step 8: BSNE_Loss=0.5807,local_loss: 0.048278217090759384,global_loss: 0.532426118850708\n",
      "Step 9: BSNE_Loss=0.7624,local_loss: 0.055697510720640345,global_loss: 0.7066869139671326\n",
      "Step 10: BSNE_Loss=0.6436,local_loss: 0.051369869884562824,global_loss: 0.5922290682792664\n",
      "Step 11: BSNE_Loss=0.6349,local_loss: 0.029369486959377723,global_loss: 0.6055530309677124\n",
      "Step 12: BSNE_Loss=0.6688,local_loss: 0.04888610291315441,global_loss: 0.6199577450752258\n",
      "Step 13: BSNE_Loss=0.6275,local_loss: 0.05578005830864531,global_loss: 0.5717586278915405\n",
      "Step 14: BSNE_Loss=0.4745,local_loss: 0.02902997609409833,global_loss: 0.44546645879745483\n",
      "Step 15: BSNE_Loss=0.6142,local_loss: 0.022858301460022306,global_loss: 0.5913673639297485\n",
      "Step 16: BSNE_Loss=0.6074,local_loss: 0.019529915887757456,global_loss: 0.5878651738166809\n",
      "Step 17: BSNE_Loss=0.5613,local_loss: 0.021546042454715898,global_loss: 0.5397648811340332\n",
      "Step 18: BSNE_Loss=0.4846,local_loss: 0.0328935822897887,global_loss: 0.4516902565956116\n",
      "Step 19: BSNE_Loss=0.5542,local_loss: 0.03456926573817138,global_loss: 0.5196709632873535\n",
      "Step 20: BSNE_Loss=0.5131,local_loss: 0.028364965011416828,global_loss: 0.48475509881973267\n",
      "Step 21: BSNE_Loss=0.4788,local_loss: 0.03110276148101835,global_loss: 0.44770270586013794\n",
      "Step 22: BSNE_Loss=0.2500,local_loss: 0.038853267132365116,global_loss: 0.21117118000984192\n",
      "Step 23: BSNE_Loss=0.3434,local_loss: 0.03342193780865789,global_loss: 0.31000998616218567\n",
      "Step 24: BSNE_Loss=0.6513,local_loss: 0.022427592961495723,global_loss: 0.6288838982582092\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 25: BSNE_Loss=0.5219,local_loss: 0.022145484969722545,global_loss: 0.4997490644454956\n",
      "Step 26: BSNE_Loss=0.3249,local_loss: 0.03749055681077415,global_loss: 0.28736189007759094\n",
      "Step 27: BSNE_Loss=0.5429,local_loss: 0.02705692807037388,global_loss: 0.5158218145370483\n",
      "Step 28: BSNE_Loss=0.5211,local_loss: 0.03254931193261682,global_loss: 0.4886002540588379\n",
      "Step 29: BSNE_Loss=0.6276,local_loss: 0.013311378404590781,global_loss: 0.6142909526824951\n",
      "Step 30: BSNE_Loss=0.5250,local_loss: 0.03412453216674091,global_loss: 0.490899920463562\n",
      "Step 31: BSNE_Loss=0.3785,local_loss: 0.03471495190316794,global_loss: 0.34373795986175537\n",
      "Step 32: BSNE_Loss=0.2430,local_loss: 0.04395154392450839,global_loss: 0.19904208183288574\n",
      "Step 33: BSNE_Loss=0.3611,local_loss: 0.025931428552229387,global_loss: 0.3351325988769531\n",
      "Step 34: BSNE_Loss=0.3177,local_loss: 0.040131797429605466,global_loss: 0.2775912284851074\n",
      "Step 35: BSNE_Loss=0.2283,local_loss: 0.04139413549017932,global_loss: 0.18686802685260773\n",
      "Step 36: BSNE_Loss=0.5756,local_loss: 0.017497848853758286,global_loss: 0.5580525994300842\n",
      "Step 37: BSNE_Loss=0.4371,local_loss: 0.026473699130397106,global_loss: 0.4106271266937256\n",
      "Step 38: BSNE_Loss=0.2082,local_loss: 0.045868902620257335,global_loss: 0.1623038351535797\n",
      "Step 39: BSNE_Loss=0.3174,local_loss: 0.03852727066234174,global_loss: 0.2788503170013428\n",
      "Step 40: BSNE_Loss=0.3411,local_loss: 0.033781877806996076,global_loss: 0.3073013126850128\n",
      "Step 41: BSNE_Loss=0.0813,local_loss: 0.05443913616923611,global_loss: 0.02685578726232052\n",
      "Step 42: BSNE_Loss=0.5021,local_loss: 0.042538781905003695,global_loss: 0.4596051573753357\n",
      "Step 43: BSNE_Loss=0.3857,local_loss: 0.03630231311612696,global_loss: 0.34941157698631287\n",
      "Step 44: BSNE_Loss=0.1750,local_loss: 0.04646396805607626,global_loss: 0.12849456071853638\n",
      "Step 45: BSNE_Loss=0.4910,local_loss: 0.0402163524296557,global_loss: 0.45079755783081055\n",
      "Step 46: BSNE_Loss=0.6173,local_loss: 0.02913756463835614,global_loss: 0.5881830453872681\n",
      "Step 47: BSNE_Loss=0.4601,local_loss: 0.02769474414606845,global_loss: 0.4324137568473816\n",
      "Step 48: BSNE_Loss=0.0878,local_loss: 0.05506440734969903,global_loss: 0.03277375549077988\n",
      "Step 49: BSNE_Loss=0.2050,local_loss: 0.04378166854025249,global_loss: 0.16120155155658722\n",
      "Step 50: BSNE_Loss=0.3181,local_loss: 0.04323132618074084,global_loss: 0.2748478055000305\n",
      "Step 51: BSNE_Loss=0.3599,local_loss: 0.035406821298905045,global_loss: 0.32452645897865295\n",
      "Step 52: BSNE_Loss=0.6050,local_loss: 0.026515031314586938,global_loss: 0.5784354209899902\n",
      "Step 53: BSNE_Loss=0.4700,local_loss: 0.03244099388331709,global_loss: 0.4375852644443512\n",
      "Step 54: BSNE_Loss=0.3140,local_loss: 0.042882657415016276,global_loss: 0.2711339592933655\n",
      "Step 55: BSNE_Loss=0.4944,local_loss: 0.034537555764936646,global_loss: 0.459850937128067\n",
      "Step 56: BSNE_Loss=0.3213,local_loss: 0.037784239927495286,global_loss: 0.28355345129966736\n",
      "Step 57: BSNE_Loss=0.5284,local_loss: 0.023334961965976227,global_loss: 0.5051137208938599\n",
      "Step 58: BSNE_Loss=0.5113,local_loss: 0.033578894810842363,global_loss: 0.47767239809036255\n",
      "Step 59: BSNE_Loss=0.5343,local_loss: 0.028811107327640986,global_loss: 0.5055386424064636\n",
      "Step 60: BSNE_Loss=0.5708,local_loss: 0.039548995318474736,global_loss: 0.5312734842300415\n",
      "Step 61: BSNE_Loss=0.2005,local_loss: 0.04517412694010268,global_loss: 0.15529154241085052\n",
      "Step 62: BSNE_Loss=0.5671,local_loss: 0.028942405694101524,global_loss: 0.5381858348846436\n",
      "Step 63: BSNE_Loss=0.4932,local_loss: 0.04135818458575112,global_loss: 0.4518653154373169\n",
      "Step 64: BSNE_Loss=0.4420,local_loss: 0.04536306212234309,global_loss: 0.39664000272750854\n",
      "Step 65: BSNE_Loss=0.5201,local_loss: 0.027551518231162832,global_loss: 0.4925433397293091\n",
      "Step 66: BSNE_Loss=0.2658,local_loss: 0.04508845676835335,global_loss: 0.22074590623378754\n",
      "Step 67: BSNE_Loss=0.4338,local_loss: 0.03488398407796448,global_loss: 0.39893707633018494\n",
      "Step 68: BSNE_Loss=0.2154,local_loss: 0.04843513249601174,global_loss: 0.16697242856025696\n",
      "Step 69: BSNE_Loss=0.3493,local_loss: 0.04161838407384641,global_loss: 0.3077256381511688\n",
      "Step 70: BSNE_Loss=0.5562,local_loss: 0.0415783830174411,global_loss: 0.5146581530570984\n",
      "Step 71: BSNE_Loss=0.4003,local_loss: 0.03427356226528125,global_loss: 0.36599841713905334\n",
      "Step 72: BSNE_Loss=0.6768,local_loss: 0.023611842795849887,global_loss: 0.6532105207443237\n",
      "Step 73: BSNE_Loss=0.5373,local_loss: 0.032662901319728134,global_loss: 0.5046634674072266\n",
      "Step 74: BSNE_Loss=0.4059,local_loss: 0.04009704394367139,global_loss: 0.3657732605934143\n",
      "Step 75: BSNE_Loss=0.3755,local_loss: 0.031916359699025086,global_loss: 0.3435439467430115\n",
      "Step 76: BSNE_Loss=0.4213,local_loss: 0.03447277630498209,global_loss: 0.3868023455142975\n",
      "Step 77: BSNE_Loss=0.2820,local_loss: 0.04575060663942314,global_loss: 0.2362377941608429\n",
      "Step 78: BSNE_Loss=0.1603,local_loss: 0.045460715039166384,global_loss: 0.11480462551116943\n",
      "Step 79: BSNE_Loss=0.2995,local_loss: 0.044193857179410795,global_loss: 0.25527223944664\n",
      "Step 80: BSNE_Loss=0.2493,local_loss: 0.037455212331633306,global_loss: 0.21186459064483643\n",
      "Step 81: BSNE_Loss=0.5122,local_loss: 0.041005761729019305,global_loss: 0.47114747762680054\n",
      "Step 82: BSNE_Loss=0.2528,local_loss: 0.04084710275192226,global_loss: 0.21196942031383514\n",
      "Step 83: BSNE_Loss=0.3136,local_loss: 0.034909757076939205,global_loss: 0.27866169810295105\n",
      "Step 84: BSNE_Loss=0.4322,local_loss: 0.04195137545754331,global_loss: 0.3902963399887085\n",
      "Step 85: BSNE_Loss=0.4151,local_loss: 0.04541560177874629,global_loss: 0.36971718072891235\n",
      "Step 86: BSNE_Loss=0.5239,local_loss: 0.03358898817230829,global_loss: 0.4903307855129242\n",
      "Step 87: BSNE_Loss=0.5172,local_loss: 0.03661336708289692,global_loss: 0.48063552379608154\n",
      "Step 88: BSNE_Loss=0.3832,local_loss: 0.033602739823807834,global_loss: 0.3496427834033966\n",
      "Step 89: BSNE_Loss=0.0686,local_loss: 0.05516348652544758,global_loss: 0.01339190173894167\n",
      "Step 90: BSNE_Loss=0.5028,local_loss: 0.043670460116837895,global_loss: 0.4591100215911865\n",
      "Step 91: BSNE_Loss=0.1840,local_loss: 0.049419725161078565,global_loss: 0.13455459475517273\n",
      "Step 92: BSNE_Loss=0.4832,local_loss: 0.029949637547890616,global_loss: 0.45323434472084045\n",
      "Step 93: BSNE_Loss=0.6390,local_loss: 0.03778483144921875,global_loss: 0.6011799573898315\n",
      "Step 94: BSNE_Loss=0.1629,local_loss: 0.048111580985805064,global_loss: 0.11483024060726166\n",
      "Step 95: BSNE_Loss=0.3697,local_loss: 0.02979809143674322,global_loss: 0.33987393975257874\n",
      "Step 96: BSNE_Loss=0.1546,local_loss: 0.054821917061859654,global_loss: 0.09974350780248642\n",
      "Step 97: BSNE_Loss=0.2491,local_loss: 0.05344376110842852,global_loss: 0.1956617385149002\n",
      "Step 98: BSNE_Loss=0.6652,local_loss: 0.0300450443909473,global_loss: 0.6351534724235535\n",
      "Step 99: BSNE_Loss=0.4000,local_loss: 0.04396811813949739,global_loss: 0.35598647594451904\n",
      "Step 100: BSNE_Loss=0.3653,local_loss: 0.04164652415883424,global_loss: 0.3236153721809387\n",
      "Step 101: BSNE_Loss=0.3080,local_loss: 0.034318003052154655,global_loss: 0.2736830413341522\n",
      "Step 102: BSNE_Loss=0.4470,local_loss: 0.02536196873143267,global_loss: 0.4216628074645996\n",
      "Step 103: BSNE_Loss=0.3928,local_loss: 0.02818477363852233,global_loss: 0.3646083176136017\n",
      "Step 104: BSNE_Loss=0.4447,local_loss: 0.03151336117312109,global_loss: 0.4132041037082672\n",
      "Step 105: BSNE_Loss=0.2064,local_loss: 0.05335401066814509,global_loss: 0.15302737057209015\n",
      "Step 106: BSNE_Loss=0.2248,local_loss: 0.0520574620375604,global_loss: 0.1727801114320755\n",
      "Step 107: BSNE_Loss=0.2877,local_loss: 0.03787361584983871,global_loss: 0.24979017674922943\n",
      "Step 108: BSNE_Loss=0.4449,local_loss: 0.03430453859188118,global_loss: 0.41060924530029297\n",
      "Step 109: BSNE_Loss=0.2847,local_loss: 0.04814886226994315,global_loss: 0.23654094338417053\n",
      "Step 110: BSNE_Loss=0.0030,local_loss: 0.0614625979233065,global_loss: -0.05847858265042305\n",
      "Center node 6158 finished at step 111 with loss 0.0030\n",
      "\n",
      "=== Pretraining Epoch 99 (Center Node: 2354) ===\n",
      "Step 0: BSNE_Loss=0.4124,local_loss: 0.044174820469058955,global_loss: 0.3682189881801605\n",
      "Step 1: BSNE_Loss=0.4617,local_loss: 0.0366831157867961,global_loss: 0.42505505681037903\n",
      "Step 2: BSNE_Loss=0.2365,local_loss: 0.049360665105496235,global_loss: 0.18716217577457428\n",
      "Step 3: BSNE_Loss=0.1490,local_loss: 0.0627573340206497,global_loss: 0.08628927171230316\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 4: BSNE_Loss=0.6360,local_loss: 0.042287335667922414,global_loss: 0.5937591791152954\n",
      "Step 5: BSNE_Loss=0.0765,local_loss: 0.04538946585139218,global_loss: 0.031063150614500046\n",
      "Step 6: BSNE_Loss=0.5386,local_loss: 0.0386637783421561,global_loss: 0.49997395277023315\n",
      "Step 7: BSNE_Loss=0.6019,local_loss: 0.0316170906065195,global_loss: 0.5702818632125854\n",
      "Step 8: BSNE_Loss=0.3313,local_loss: 0.04222034126150404,global_loss: 0.28908953070640564\n",
      "Step 9: BSNE_Loss=0.3798,local_loss: 0.04376098134993352,global_loss: 0.3360300362110138\n",
      "Step 10: BSNE_Loss=0.5665,local_loss: 0.03028028881654642,global_loss: 0.5362527966499329\n",
      "Step 11: BSNE_Loss=0.3805,local_loss: 0.03684067121864819,global_loss: 0.34366321563720703\n",
      "Step 12: BSNE_Loss=0.3627,local_loss: 0.04176091952504646,global_loss: 0.32096800208091736\n",
      "Step 13: BSNE_Loss=0.4176,local_loss: 0.04875720299255669,global_loss: 0.3688811659812927\n",
      "Step 14: BSNE_Loss=0.1275,local_loss: 0.05189336327791516,global_loss: 0.0755663514137268\n",
      "Step 15: BSNE_Loss=0.2741,local_loss: 0.04418544449961971,global_loss: 0.22991180419921875\n",
      "Step 16: BSNE_Loss=0.4940,local_loss: 0.03526403825666423,global_loss: 0.45869696140289307\n",
      "Step 17: BSNE_Loss=0.4062,local_loss: 0.031031188908915604,global_loss: 0.3752121329307556\n",
      "Step 18: BSNE_Loss=0.4325,local_loss: 0.0371494528591556,global_loss: 0.39539656043052673\n",
      "Step 19: BSNE_Loss=0.5805,local_loss: 0.0352523169818732,global_loss: 0.54520583152771\n",
      "Step 20: BSNE_Loss=0.4051,local_loss: 0.032984233816116516,global_loss: 0.3720707297325134\n",
      "Step 21: BSNE_Loss=0.4887,local_loss: 0.02977424760496044,global_loss: 0.458929181098938\n",
      "Step 22: BSNE_Loss=0.1058,local_loss: 0.061417033315872595,global_loss: 0.04438113793730736\n",
      "Step 23: BSNE_Loss=0.3401,local_loss: 0.03743798142699507,global_loss: 0.3026924133300781\n",
      "Step 24: BSNE_Loss=0.3881,local_loss: 0.0435014979372203,global_loss: 0.34459856152534485\n",
      "Step 25: BSNE_Loss=0.3280,local_loss: 0.03790710683878354,global_loss: 0.29013946652412415\n",
      "Step 26: BSNE_Loss=0.3276,local_loss: 0.041416432980958616,global_loss: 0.28622639179229736\n",
      "Step 27: BSNE_Loss=0.4263,local_loss: 0.044291289433046464,global_loss: 0.3820114731788635\n",
      "Step 28: BSNE_Loss=0.3834,local_loss: 0.044585104516052285,global_loss: 0.338836669921875\n",
      "Step 29: BSNE_Loss=0.5623,local_loss: 0.028365724001426375,global_loss: 0.5339776873588562\n",
      "Step 30: BSNE_Loss=0.4388,local_loss: 0.03457232566109436,global_loss: 0.40424278378486633\n",
      "Step 31: BSNE_Loss=0.4790,local_loss: 0.029853590555825947,global_loss: 0.44913172721862793\n",
      "Step 32: BSNE_Loss=0.2481,local_loss: 0.04679148239175255,global_loss: 0.20130972564220428\n",
      "Step 33: BSNE_Loss=0.1677,local_loss: 0.040468674899379595,global_loss: 0.1271972358226776\n",
      "Step 34: BSNE_Loss=0.0747,local_loss: 0.04618975331916344,global_loss: 0.028550658375024796\n",
      "Step 35: BSNE_Loss=0.3473,local_loss: 0.03693782839037402,global_loss: 0.3103598952293396\n",
      "Step 36: BSNE_Loss=0.3933,local_loss: 0.03408106173856013,global_loss: 0.3592669665813446\n",
      "Step 37: BSNE_Loss=0.2113,local_loss: 0.0548516314716027,global_loss: 0.15642380714416504\n",
      "Step 38: BSNE_Loss=0.5574,local_loss: 0.043217596096147386,global_loss: 0.5142292976379395\n",
      "Step 39: BSNE_Loss=0.2064,local_loss: 0.04964439316195099,global_loss: 0.15679919719696045\n",
      "Step 40: BSNE_Loss=0.3111,local_loss: 0.043143382459630064,global_loss: 0.2679903507232666\n",
      "Step 41: BSNE_Loss=0.8256,local_loss: 0.03469149447472031,global_loss: 0.7908880114555359\n",
      "Step 42: BSNE_Loss=0.3681,local_loss: 0.03327342598846673,global_loss: 0.3348020613193512\n",
      "Step 43: BSNE_Loss=0.3761,local_loss: 0.0437176014586398,global_loss: 0.3323364853858948\n",
      "Step 44: BSNE_Loss=0.0996,local_loss: 0.04332966083501812,global_loss: 0.056250832974910736\n",
      "Step 45: BSNE_Loss=0.3179,local_loss: 0.03247103462923172,global_loss: 0.2854160964488983\n",
      "Step 46: BSNE_Loss=0.1609,local_loss: 0.056510314991779706,global_loss: 0.1044127568602562\n",
      "Step 47: BSNE_Loss=0.5221,local_loss: 0.03682340395558934,global_loss: 0.4852554202079773\n",
      "Step 48: BSNE_Loss=0.4770,local_loss: 0.04799043163923093,global_loss: 0.42896726727485657\n",
      "Step 49: BSNE_Loss=0.4368,local_loss: 0.03181921854938047,global_loss: 0.40495777130126953\n",
      "Step 50: BSNE_Loss=0.3079,local_loss: 0.03879816986838179,global_loss: 0.2690523862838745\n",
      "Step 51: BSNE_Loss=0.4962,local_loss: 0.030200210207589385,global_loss: 0.46600455045700073\n",
      "Step 52: BSNE_Loss=0.5994,local_loss: 0.038331931794200476,global_loss: 0.561087965965271\n",
      "Step 53: BSNE_Loss=0.1886,local_loss: 0.042881397658718644,global_loss: 0.14569099247455597\n",
      "Step 54: BSNE_Loss=0.1052,local_loss: 0.0520476773361305,global_loss: 0.05313611403107643\n",
      "Step 55: BSNE_Loss=0.5445,local_loss: 0.03267292061631964,global_loss: 0.5118497014045715\n",
      "Step 56: BSNE_Loss=0.5362,local_loss: 0.039861947715873966,global_loss: 0.49637168645858765\n",
      "Step 57: BSNE_Loss=0.5524,local_loss: 0.038619856832150204,global_loss: 0.5137367248535156\n",
      "Step 58: BSNE_Loss=0.1746,local_loss: 0.04929752130212872,global_loss: 0.12530438601970673\n",
      "Step 59: BSNE_Loss=0.5765,local_loss: 0.0445494586879909,global_loss: 0.5319478511810303\n",
      "Step 60: BSNE_Loss=0.4856,local_loss: 0.03135294154884916,global_loss: 0.45427560806274414\n",
      "Step 61: BSNE_Loss=0.4358,local_loss: 0.04483921916164972,global_loss: 0.3909197747707367\n",
      "Step 62: BSNE_Loss=0.4475,local_loss: 0.04599207722159964,global_loss: 0.4014892876148224\n",
      "Step 63: BSNE_Loss=0.2454,local_loss: 0.045771985130913656,global_loss: 0.1996498554944992\n",
      "Step 64: BSNE_Loss=0.7032,local_loss: 0.03524287428682784,global_loss: 0.66793292760849\n",
      "Step 65: BSNE_Loss=0.6258,local_loss: 0.0419923015991278,global_loss: 0.5837814807891846\n",
      "Step 66: BSNE_Loss=0.1894,local_loss: 0.048168028407184954,global_loss: 0.14120911061763763\n",
      "Step 67: BSNE_Loss=0.6855,local_loss: 0.037728306053452426,global_loss: 0.647800862789154\n",
      "Step 68: BSNE_Loss=0.2215,local_loss: 0.04019539426278375,global_loss: 0.1812593787908554\n",
      "Step 69: BSNE_Loss=0.0281,local_loss: 0.06882153521839171,global_loss: -0.040679533034563065\n",
      "Center node 2354 finished at step 70 with loss 0.0281\n",
      "\n",
      "=== Starting Fine-tuning ===\n",
      "Epoch: 000 | Loss: 0.2381 | Val AUC: 0.4196 | Val F1: 0.4751\n",
      "Epoch: 005 | Loss: 0.1353 | Val AUC: 0.5137 | Val F1: 0.4751\n",
      "Epoch: 010 | Loss: 0.5916 | Val AUC: 0.6489 | Val F1: 0.4751\n",
      "Epoch: 015 | Loss: 0.0783 | Val AUC: 0.8050 | Val F1: 0.7272\n",
      "Epoch: 020 | Loss: 0.0336 | Val AUC: 0.8346 | Val F1: 0.4751\n",
      "Epoch: 025 | Loss: 0.2161 | Val AUC: 0.8556 | Val F1: 0.4751\n",
      "Epoch: 030 | Loss: 0.0642 | Val AUC: 0.8698 | Val F1: 0.4751\n",
      "Epoch: 035 | Loss: 0.0003 | Val AUC: 0.8755 | Val F1: 0.4751\n",
      "Epoch: 040 | Loss: 0.0176 | Val AUC: 0.8764 | Val F1: 0.4751\n",
      "Epoch: 045 | Loss: 0.0015 | Val AUC: 0.8775 | Val F1: 0.4813\n",
      "Epoch: 050 | Loss: 0.0288 | Val AUC: 0.8805 | Val F1: 0.4927\n",
      "Epoch: 055 | Loss: 0.0619 | Val AUC: 0.8751 | Val F1: 0.4809\n",
      "Epoch: 060 | Loss: 0.0000 | Val AUC: 0.8683 | Val F1: 0.4811\n",
      "Epoch: 065 | Loss: 0.0487 | Val AUC: 0.8785 | Val F1: 0.7614\n",
      "Epoch: 070 | Loss: 0.0755 | Val AUC: 0.8835 | Val F1: 0.7367\n",
      "Epoch: 075 | Loss: 0.1715 | Val AUC: 0.8714 | Val F1: 0.4948\n",
      "Epoch: 080 | Loss: 0.2653 | Val AUC: 0.8621 | Val F1: 0.4909\n",
      "Epoch: 085 | Loss: 0.1515 | Val AUC: 0.8761 | Val F1: 0.7527\n",
      "Epoch: 090 | Loss: 0.0398 | Val AUC: 0.8850 | Val F1: 0.7603\n",
      "Epoch: 095 | Loss: 0.0000 | Val AUC: 0.8133 | Val F1: 0.4865\n",
      "Epoch: 100 | Loss: 0.0002 | Val AUC: 0.7815 | Val F1: 0.4909\n",
      "Epoch: 105 | Loss: 0.0005 | Val AUC: 0.8449 | Val F1: 0.4924\n",
      "Epoch: 110 | Loss: 0.0065 | Val AUC: 0.8914 | Val F1: 0.7632\n",
      "Epoch: 115 | Loss: 0.0688 | Val AUC: 0.8933 | Val F1: 0.7581\n",
      "Epoch: 120 | Loss: 0.0099 | Val AUC: 0.8898 | Val F1: 0.7046\n",
      "Epoch: 125 | Loss: 0.1325 | Val AUC: 0.8914 | Val F1: 0.7319\n",
      "Epoch: 130 | Loss: 0.0000 | Val AUC: 0.8921 | Val F1: 0.7443\n"
     ]
    }
   ],
   "source": [
    "# 进行完整测试（新）（每个step不重新采样子图）（不加入平方项）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "#     \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 50,\n",
    "    \"pretrain_lr\": 0.0005, #0.0005，\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 4,\n",
    "    \"num_layers\": 3,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=args['num_layers'],\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "    \n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "\n",
    "loss_threshold = 0.05\n",
    "sample_size = 50\n",
    "max_steps_per_center = 200  # 防止死循环\n",
    "max_epochs = 100\n",
    "center_indices = list(range(feat_data.shape[0]))\n",
    "# 在每轮epoch前随机打乱中心点顺序\n",
    "rd.shuffle(center_indices)\n",
    "\n",
    "# 限制训练的中心点数量\n",
    "center_indices = center_indices[:max_epochs]\n",
    "\n",
    "\n",
    "for epoch, center_idx in enumerate(center_indices):\n",
    "    print(f\"\\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===\")\n",
    "    step = 0\n",
    "    \n",
    "    \n",
    "    while True:\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        total_loss = 0.0  # 每个step都要重置\n",
    "        eps = 1e-10\n",
    "        \n",
    "        \n",
    "        # 构建Bp子图\n",
    "        dist_row = dist_matrix[center_idx].cpu().numpy()\n",
    "        probs = np.power(10.0, -dist_row)\n",
    "        probs[center_idx] = 0\n",
    "        probs /= probs.sum()\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            bp_nodes = [center_idx] + neighbors.tolist()\n",
    "        else:\n",
    "            bp_nodes = [center_idx]\n",
    "\n",
    "        # 构建Bu子图\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "            bu_nodes = [center_idx] + neighbors.tolist()\n",
    "        else:\n",
    "            bu_nodes = [center_idx]\n",
    "        \n",
    "        # 计算Bp子图中所有节点的特征\n",
    "        bp_node_features = []\n",
    "        for node_idx in bp_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "        bp_features = torch.stack(bp_node_features)\n",
    "        \n",
    "        # 计算Bu子图中所有节点的特征\n",
    "        bu_node_features = []\n",
    "        for node_idx in bu_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "        bu_features = torch.stack(bu_node_features)\n",
    "        \n",
    "        # 计算loss\n",
    "        center_feature = bp_features[0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_nodes[1:]\n",
    "        other_bp_features = bp_features[1:]\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            # 计算中心节点到其他Bp节点的原始距离\n",
    "            orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "            # 计算P向量（基于节点路径距离）\n",
    "            P = np.exp(-orig_dists_bp)\n",
    "            P_sum = P.sum()\n",
    "            P = P / (P_sum + eps)\n",
    "            P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "#         feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "# #         feat_dists_bp.requires_grad_(True)\n",
    "#         Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "        \n",
    "#         log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        # 原局部loss计算公式\n",
    "#         log_ratio = torch.log(((P + eps)/(Q + eps)))\n",
    "#         loss_local = log_ratio.mean() \n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "        center_norm = F.normalize(center_feature, dim=0)\n",
    "        other_norm = F.normalize(other_bp_features, dim=1)\n",
    "        feat_dists_bp = center_norm @ other_norm.t()\n",
    "        Q = torch.softmax(feat_dists_bp.squeeze(0), dim=0)\n",
    "\n",
    "        M = 0.5 * (P + Q)\n",
    "        loss_local = 0.5 * (P * torch.log(P/M) + Q * torch.log(Q/M)).sum()\n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features_ = bu_features[1:]  # 排除中心节点\n",
    "#         feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features_).squeeze(0)\n",
    "        feat_dists_bu = F.normalize(center_feature, dim=0) @ F.normalize(bu_features_, dim=1).t()\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        \n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))\n",
    "\n",
    "        # = 5. 损失合并 =\n",
    "        total_loss += loss_local + loss_global\n",
    "#         total_loss += loss_local\n",
    "    \n",
    "        total_loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        \n",
    "#         for name, parms in bsne_model.named_parameters():\n",
    "#             grad_value = parms.grad\n",
    "#             if grad_value is not None:\n",
    "#                 grad_mean = torch.mean(grad_value)\n",
    "#             else:\n",
    "#                 grad_mean = \"No gradient\"\n",
    "\n",
    "#             print(f'-->name: {name} '\n",
    "#                   f'-->grad_requirs: {parms.requires_grad} '\n",
    "#                   f'--weight: {torch.mean(parms.data).item():.6f} '\n",
    "#                   f'-->grad_value: {grad_mean}')\n",
    "        \n",
    "#         print(\"P:\",P)\n",
    "#         print(\"Q:\",Q)\n",
    "        print(f\"Step {step}: BSNE_Loss={total_loss.item():.4f},local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        step += 1\n",
    "        \n",
    "        if total_loss.item() < loss_threshold or step >= max_steps_per_center:\n",
    "            print(f\"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}\")\n",
    "            break\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a6868a76",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features_ = bu_features[1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features_).squeeze(0)\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        \n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "46aa6c5c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1848.3690428733298"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Z_global"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "f902614e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.006302957476690684"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bp_global_prob_sum"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "8f31ef9e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1.5056504820318706"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "k_Bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "0f945e85",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(2.2774e-07, device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum_e_bu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "2b0477b4",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(31.9611, device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum_e_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "9dc56243",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0.5417,  0.2970,  0.5484,  0.5653,  0.3658,  0.3728,  0.5375,  0.5428,\n",
       "         0.3028,  0.5126,  0.5093,  0.4906,  0.4820,  0.4191,  0.4850,  0.5435,\n",
       "         0.4071,  0.5824,  0.4072,  0.4809,  0.6146,  0.1838,  0.5535, -0.0170,\n",
       "         0.3791,  0.1149,  0.5958,  0.1898,  0.5828,  0.4661,  0.5097,  0.3403,\n",
       "         0.3679,  0.4854,  0.5950,  0.5366,  0.5418,  0.5855,  0.4832,  0.4730,\n",
       "         0.5091,  0.4791,  0.5785,  0.5845,  0.4667,  0.1149,  0.5454,  0.5531,\n",
       "         0.5720,  0.5170], device='cuda:0', grad_fn=<SqueezeBackward4>)"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "d4463cd2",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([22.2667, 18.1047, 20.3245, 18.8467, 19.0224, 21.9243, 25.8503, 22.7061,\n",
       "        24.3284, 21.3644, 18.4847, 23.2717, 19.4243, 22.4382, 20.1043, 18.1280,\n",
       "        20.8590, 19.6308, 18.4293, 17.0941, 23.5082, 25.6544, 23.0667, 22.2153,\n",
       "        19.0498, 22.2350, 20.5153, 29.6577, 26.0947, 17.8830, 17.5824, 25.3863,\n",
       "        25.7024, 19.6989, 19.9840, 19.6475, 18.9952, 19.1879, 20.0183, 20.8558,\n",
       "        19.8145, 18.8797, 24.4289, 19.9873, 19.8319, 19.6016, 26.5414, 18.4759,\n",
       "        18.0288, 18.4012], device='cuda:0', grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f367d1c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7b57b686",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "145e1bca",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 37\u001b[39m\n\u001b[32m     34\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m'\u001b[39m\u001b[33mloading data...\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m     35\u001b[39m prefix = \u001b[33m\"\u001b[39m\u001b[33m../../data/\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m---> \u001b[39m\u001b[32m37\u001b[39m edge_indexs, feat_data, labels = \u001b[43mload_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mdataset\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mlayers_tree\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprefix\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     39\u001b[39m np.random.seed(args[\u001b[33m'\u001b[39m\u001b[33mseed\u001b[39m\u001b[33m'\u001b[39m])\n\u001b[32m     40\u001b[39m rd.seed(args[\u001b[33m'\u001b[39m\u001b[33mseed\u001b[39m\u001b[33m'\u001b[39m])\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 188\u001b[39m, in \u001b[36mload_data\u001b[39m\u001b[34m(data, k, prefix)\u001b[39m\n\u001b[32m    186\u001b[39m         tree = pickle.load(file)\n\u001b[32m    187\u001b[39m     file.close()\n\u001b[32m--> \u001b[39m\u001b[32m188\u001b[39m     relation2_tree.append(\u001b[43mnumpy_array_to_edge_index\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtree\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[32m    189\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(pickle_file[\u001b[33m'\u001b[39m\u001b[33mamz_uvu\u001b[39m\u001b[33m'\u001b[39m], \u001b[33m'\u001b[39m\u001b[33mrb\u001b[39m\u001b[33m'\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m file:\n\u001b[32m    190\u001b[39m     relation3 = pickle.load(file)\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 111\u001b[39m, in \u001b[36mnumpy_array_to_edge_index\u001b[39m\u001b[34m(np_array)\u001b[39m\n\u001b[32m    109\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mnumpy_array_to_edge_index\u001b[39m(np_array):\n\u001b[32m    110\u001b[39m     \u001b[38;5;28;01massert\u001b[39;00m np_array.ndim == \u001b[32m2\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m np_array.shape[\u001b[32m0\u001b[39m] == np_array.shape[\u001b[32m1\u001b[39m], \u001b[33m\"\u001b[39m\u001b[33mInput must be a square matrix.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m--> \u001b[39m\u001b[32m111\u001b[39m     rows, cols = \u001b[43mnp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mnonzero\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnp_array\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    112\u001b[39m     edge_index = np.vstack((rows, cols))\n\u001b[32m    113\u001b[39m     edge_index_tensor = torch.from_numpy(edge_index).long()\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/numpy/core/fromnumeric.py:1973\u001b[39m, in \u001b[36mnonzero\u001b[39m\u001b[34m(a)\u001b[39m\n\u001b[32m   1881\u001b[39m \u001b[38;5;129m@array_function_dispatch\u001b[39m(_nonzero_dispatcher)\n\u001b[32m   1882\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mnonzero\u001b[39m(a):\n\u001b[32m   1883\u001b[39m \u001b[38;5;250m    \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m   1884\u001b[39m \u001b[33;03m    Return the indices of the elements that are non-zero.\u001b[39;00m\n\u001b[32m   1885\u001b[39m \n\u001b[32m   (...)\u001b[39m\u001b[32m   1971\u001b[39m \n\u001b[32m   1972\u001b[39m \u001b[33;03m    \"\"\"\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1973\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_wrapfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43ma\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mnonzero\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/numpy/core/fromnumeric.py:59\u001b[39m, in \u001b[36m_wrapfunc\u001b[39m\u001b[34m(obj, method, *args, **kwds)\u001b[39m\n\u001b[32m     56\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m _wrapit(obj, method, *args, **kwds)\n\u001b[32m     58\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m---> \u001b[39m\u001b[32m59\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mbound\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwds\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     60\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m:\n\u001b[32m     61\u001b[39m     \u001b[38;5;66;03m# A TypeError occurs if the object does have such a method in its\u001b[39;00m\n\u001b[32m     62\u001b[39m     \u001b[38;5;66;03m# class, but its signature is not identical to that of NumPy's. This\u001b[39;00m\n\u001b[32m   (...)\u001b[39m\u001b[32m     66\u001b[39m     \u001b[38;5;66;03m# Call _wrapit from within the except clause to ensure a potential\u001b[39;00m\n\u001b[32m     67\u001b[39m     \u001b[38;5;66;03m# exception has a traceback chain.\u001b[39;00m\n\u001b[32m     68\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m _wrapit(obj, method, *args, **kwds)\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 进行完整测试（新）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "#     \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.001, #0.0001\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "    \n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "center_indices = list(range(feat_data.shape[0]))\n",
    "loss_threshold = 0.2\n",
    "sample_size = 50\n",
    "max_steps_per_center = 1000  # 防止死循环\n",
    "\n",
    "for epoch, center_idx in enumerate(center_indices):\n",
    "    print(f\"\\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===\")\n",
    "    step = 0\n",
    "    while True:\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        total_loss = 0.0  # 每个step都要重置\n",
    "        eps = 1e-10\n",
    "        \n",
    "        # 构建Bp子图\n",
    "        dist_row = dist_matrix[center_idx].cpu().numpy()\n",
    "        probs = np.power(10.0, -dist_row)\n",
    "        probs[center_idx] = 0\n",
    "        probs /= probs.sum()\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            bp_nodes = [center_idx] + neighbors.tolist()\n",
    "        else:\n",
    "            bp_nodes = [center_idx]\n",
    "        \n",
    "        # 构建Bu子图\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "            bu_nodes = [center_idx] + neighbors.tolist()\n",
    "        else:\n",
    "            bu_nodes = [center_idx]\n",
    "        \n",
    "        # 计算Bp子图中所有节点的特征\n",
    "        bp_node_features = []\n",
    "        for node_idx in bp_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "        bp_features = torch.stack(bp_node_features)\n",
    "        \n",
    "        # 计算Bu子图中所有节点的特征\n",
    "        bu_node_features = []\n",
    "        for node_idx in bu_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "        bu_features = torch.stack(bu_node_features)\n",
    "        \n",
    "        # 计算loss\n",
    "        center_feature = bp_features[0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_nodes[1:]\n",
    "        other_bp_features = bp_features[1:]\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            # 计算中心节点到其他Bp节点的原始距离\n",
    "            orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "            # 计算P向量（基于节点路径距离）\n",
    "            P = np.exp(-orig_dists_bp)\n",
    "            P_sum = P.sum()\n",
    "            P = P / (P_sum + eps)\n",
    "            P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "        # 计算中心节点到其他Bp节点的特征距离\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "        feat_dists_bp.requires_grad_(True)\n",
    "\n",
    "        \n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "        \n",
    "#         log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        log_ratio = torch.log(((P + eps)/(Q + eps))**2)\n",
    "        loss_local = log_ratio.mean() \n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features_ = bu_features[1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features_).squeeze(0)\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        \n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10)**2)\n",
    "\n",
    "        # = 5. 损失合并 =\n",
    "        total_loss += loss_local + loss_global\n",
    "        \n",
    "        total_loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        print(f\"Step {step}: BSNE_Loss={total_loss.item():.4f},local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        step += 1\n",
    "        \n",
    "        if total_loss.item() < loss_threshold or step >= max_steps_per_center:\n",
    "            print(f\"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}\")\n",
    "            break\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "bb0398ee",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "\n",
      "=== Pretraining Epoch 0 (Center Node: 9507) ===\n",
      "Step 0: BSNE_Loss=1.0846,local_loss: 0.3155012185058245,global_loss: 0.7691054940223694\n",
      "Step 1: BSNE_Loss=1.0081,local_loss: 0.3898062660524886,global_loss: 0.6182875037193298\n",
      "Step 2: BSNE_Loss=1.1729,local_loss: 0.31663119358121905,global_loss: 0.8562723398208618\n",
      "Step 3: BSNE_Loss=1.0455,local_loss: 0.3622449257005354,global_loss: 0.6832051873207092\n",
      "Step 4: BSNE_Loss=1.1170,local_loss: 0.39993292265325575,global_loss: 0.7170361280441284\n",
      "Step 5: BSNE_Loss=0.8825,local_loss: 0.6452789747846152,global_loss: 0.23722884058952332\n",
      "Step 6: BSNE_Loss=0.7278,local_loss: 0.6487915979046105,global_loss: 0.07896597683429718\n",
      "Step 7: BSNE_Loss=1.3020,local_loss: 0.305373479652142,global_loss: 0.9966071844100952\n",
      "Step 8: BSNE_Loss=0.9543,local_loss: 0.17966511342148445,global_loss: 0.7746770977973938\n",
      "Step 9: BSNE_Loss=1.3143,local_loss: 0.276812254579184,global_loss: 1.0374668836593628\n",
      "Step 10: BSNE_Loss=0.9367,local_loss: 0.21005017499105813,global_loss: 0.7266564965248108\n",
      "Step 11: BSNE_Loss=0.8150,local_loss: 0.2018595507060228,global_loss: 0.6131269335746765\n",
      "Step 12: BSNE_Loss=1.4542,local_loss: 0.1361999567082159,global_loss: 1.3179943561553955\n",
      "Step 13: BSNE_Loss=0.9644,local_loss: 0.3349478352861951,global_loss: 0.6294364929199219\n",
      "Step 14: BSNE_Loss=1.0375,local_loss: 0.31062740581435977,global_loss: 0.7269014120101929\n",
      "Step 15: BSNE_Loss=1.3000,local_loss: 0.445582546286663,global_loss: 0.8544555902481079\n",
      "Step 16: BSNE_Loss=1.0649,local_loss: 0.13093212744175928,global_loss: 0.9339261651039124\n",
      "Step 17: BSNE_Loss=0.6930,local_loss: 0.28846767707091564,global_loss: 0.4045794606208801\n",
      "Step 18: BSNE_Loss=1.0937,local_loss: 0.3387495432728013,global_loss: 0.7549154758453369\n",
      "Step 19: BSNE_Loss=1.3165,local_loss: 0.2705165653283148,global_loss: 1.0459799766540527\n",
      "Step 20: BSNE_Loss=1.2282,local_loss: 0.5312832267432961,global_loss: 0.6969660520553589\n",
      "Step 21: BSNE_Loss=1.3127,local_loss: 0.30157267604810395,global_loss: 1.0111234188079834\n",
      "Step 22: BSNE_Loss=1.3423,local_loss: 0.19791935108347133,global_loss: 1.1444107294082642\n",
      "Step 23: BSNE_Loss=0.9555,local_loss: 0.5669067253159328,global_loss: 0.38863852620124817\n",
      "Step 24: BSNE_Loss=1.3283,local_loss: 0.19499292912960617,global_loss: 1.1333019733428955\n",
      "Step 25: BSNE_Loss=1.5076,local_loss: 0.2197938664325176,global_loss: 1.2877691984176636\n",
      "Step 26: BSNE_Loss=1.5827,local_loss: 0.3339408765253114,global_loss: 1.248745083808899\n",
      "Step 27: BSNE_Loss=0.9669,local_loss: 0.159060422593261,global_loss: 0.8078680634498596\n",
      "Step 28: BSNE_Loss=1.5333,local_loss: 0.22536822038215804,global_loss: 1.3079626560211182\n",
      "Step 29: BSNE_Loss=1.5786,local_loss: 0.46012942607053503,global_loss: 1.118471622467041\n",
      "Step 30: BSNE_Loss=1.3375,local_loss: 0.4046175391005614,global_loss: 0.9328776597976685\n",
      "Step 31: BSNE_Loss=1.3572,local_loss: 0.32771582023710705,global_loss: 1.0294461250305176\n",
      "Step 32: BSNE_Loss=1.1167,local_loss: 0.25594695487794744,global_loss: 0.8607988953590393\n",
      "Step 33: BSNE_Loss=0.9531,local_loss: 0.4277349576712169,global_loss: 0.5253806710243225\n",
      "Step 34: BSNE_Loss=1.5067,local_loss: 0.3035742960880534,global_loss: 1.2031738758087158\n",
      "Step 35: BSNE_Loss=0.9601,local_loss: 0.1970853448845678,global_loss: 0.7630457282066345\n",
      "Step 36: BSNE_Loss=1.0475,local_loss: 0.21917406700678702,global_loss: 0.82828688621521\n",
      "Step 37: BSNE_Loss=0.5542,local_loss: 0.3414665348006825,global_loss: 0.212741419672966\n",
      "Step 38: BSNE_Loss=1.4182,local_loss: 0.16252337662372157,global_loss: 1.2556627988815308\n",
      "Step 39: BSNE_Loss=0.8674,local_loss: 0.5083279596885861,global_loss: 0.35906076431274414\n",
      "Step 40: BSNE_Loss=0.7169,local_loss: 0.22494399908218476,global_loss: 0.4919252097606659\n",
      "Step 41: BSNE_Loss=1.2249,local_loss: 0.30050965599958146,global_loss: 0.9244369268417358\n",
      "Step 42: BSNE_Loss=1.0458,local_loss: 0.21498719444721218,global_loss: 0.8308388590812683\n",
      "Step 43: BSNE_Loss=1.0397,local_loss: 0.43897259743167966,global_loss: 0.6007727384567261\n",
      "Step 44: BSNE_Loss=1.1571,local_loss: 0.19188490501658195,global_loss: 0.965253472328186\n",
      "Step 45: BSNE_Loss=1.5960,local_loss: 0.24588731038290854,global_loss: 1.3501490354537964\n",
      "Step 46: BSNE_Loss=0.9358,local_loss: 0.47934599686017243,global_loss: 0.45646268129348755\n",
      "Step 47: BSNE_Loss=1.4274,local_loss: 0.2276885362168379,global_loss: 1.1997227668762207\n",
      "Step 48: BSNE_Loss=0.6599,local_loss: 0.49012681877090186,global_loss: 0.1697775274515152\n",
      "Step 49: BSNE_Loss=0.5211,local_loss: 0.26461316592574546,global_loss: 0.2564728260040283\n",
      "Step 50: BSNE_Loss=0.8425,local_loss: 0.17910836195245544,global_loss: 0.6633439660072327\n",
      "Step 51: BSNE_Loss=1.1560,local_loss: 0.27722429478032806,global_loss: 0.8787968158721924\n",
      "Step 52: BSNE_Loss=1.1714,local_loss: 0.48249374608756357,global_loss: 0.688896119594574\n",
      "Step 53: BSNE_Loss=0.8991,local_loss: 0.272490166526077,global_loss: 0.6265691518783569\n",
      "Step 54: BSNE_Loss=1.1526,local_loss: 0.6160001614825379,global_loss: 0.5366162061691284\n",
      "Step 55: BSNE_Loss=1.0979,local_loss: 0.47345827350027053,global_loss: 0.6244581937789917\n",
      "Step 56: BSNE_Loss=1.2490,local_loss: 0.37719478763489733,global_loss: 0.871820867061615\n",
      "Step 57: BSNE_Loss=1.3426,local_loss: 0.24988487876156054,global_loss: 1.092692494392395\n",
      "Step 58: BSNE_Loss=1.4772,local_loss: 0.2365466962309805,global_loss: 1.240675926208496\n",
      "Step 59: BSNE_Loss=1.0688,local_loss: 0.4250221260132655,global_loss: 0.6437960267066956\n",
      "Step 60: BSNE_Loss=1.0998,local_loss: 0.2618341590560937,global_loss: 0.8379654884338379\n",
      "Step 61: BSNE_Loss=1.0207,local_loss: 0.21213632565008916,global_loss: 0.8085278868675232\n",
      "Step 62: BSNE_Loss=1.2824,local_loss: 0.3465688415466097,global_loss: 0.935869038105011\n",
      "Step 63: BSNE_Loss=1.3646,local_loss: 0.12676521566419915,global_loss: 1.2378458976745605\n",
      "Step 64: BSNE_Loss=1.2053,local_loss: 0.266455345457339,global_loss: 0.9388843774795532\n",
      "Step 65: BSNE_Loss=0.9097,local_loss: 0.1827610496841415,global_loss: 0.726962149143219\n",
      "Step 66: BSNE_Loss=1.2799,local_loss: 0.19184542390928586,global_loss: 1.0880351066589355\n",
      "Step 67: BSNE_Loss=0.9888,local_loss: 0.25488833654703696,global_loss: 0.7338933944702148\n",
      "Step 68: BSNE_Loss=1.0557,local_loss: 0.2919971768181867,global_loss: 0.7637015581130981\n",
      "Step 69: BSNE_Loss=0.9709,local_loss: 0.39749612631798786,global_loss: 0.5733737349510193\n",
      "Step 70: BSNE_Loss=1.4578,local_loss: 0.25590071447571083,global_loss: 1.2018883228302002\n",
      "Step 71: BSNE_Loss=0.6168,local_loss: 0.29573401702057617,global_loss: 0.32109442353248596\n",
      "Step 72: BSNE_Loss=1.4708,local_loss: 0.394904542362473,global_loss: 1.0758466720581055\n",
      "Step 73: BSNE_Loss=1.0727,local_loss: 0.3939447890755794,global_loss: 0.6787390112876892\n",
      "Step 74: BSNE_Loss=1.4075,local_loss: 0.16920044444307117,global_loss: 1.2382795810699463\n",
      "Step 75: BSNE_Loss=1.0845,local_loss: 0.1015235718492181,global_loss: 0.9829659461975098\n",
      "Step 76: BSNE_Loss=0.9108,local_loss: 0.4480815782007595,global_loss: 0.4626722037792206\n",
      "Step 77: BSNE_Loss=1.3821,local_loss: 0.2666699689722828,global_loss: 1.115384578704834\n",
      "Step 78: BSNE_Loss=1.1989,local_loss: 0.15040089152213892,global_loss: 1.0485392808914185\n",
      "Step 79: BSNE_Loss=1.2396,local_loss: 0.23853196170497926,global_loss: 1.001020908355713\n",
      "Step 80: BSNE_Loss=1.2660,local_loss: 0.17189983367200257,global_loss: 1.0940816402435303\n",
      "Step 81: BSNE_Loss=1.0549,local_loss: 0.18791732164724204,global_loss: 0.8669955730438232\n",
      "Step 82: BSNE_Loss=0.7901,local_loss: 0.2088157366758131,global_loss: 0.5812839865684509\n",
      "Step 83: BSNE_Loss=0.9763,local_loss: 0.21877175708410007,global_loss: 0.7575410008430481\n",
      "Step 84: BSNE_Loss=1.2465,local_loss: 0.39884404961927394,global_loss: 0.847670316696167\n",
      "Step 85: BSNE_Loss=1.0955,local_loss: 0.245697417889984,global_loss: 0.8498021364212036\n",
      "Step 86: BSNE_Loss=1.4329,local_loss: 0.4424696360334663,global_loss: 0.9904268980026245\n",
      "Step 87: BSNE_Loss=1.3039,local_loss: 0.2840375538590949,global_loss: 1.0198508501052856\n",
      "Step 88: BSNE_Loss=0.8228,local_loss: 0.1437591080723938,global_loss: 0.6790028810501099\n",
      "Step 89: BSNE_Loss=0.9558,local_loss: 0.3085616240697196,global_loss: 0.6472593545913696\n",
      "Step 90: BSNE_Loss=0.6522,local_loss: 0.1402542576709789,global_loss: 0.5119717121124268\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 91: BSNE_Loss=1.3300,local_loss: 0.29994458344870806,global_loss: 1.0300853252410889\n",
      "Step 92: BSNE_Loss=0.6947,local_loss: 0.18593411631496487,global_loss: 0.5087465047836304\n",
      "Step 93: BSNE_Loss=0.8992,local_loss: 0.29948567788088915,global_loss: 0.5997148156166077\n",
      "Step 94: BSNE_Loss=1.1860,local_loss: 0.43615545129013406,global_loss: 0.7498376369476318\n",
      "Step 95: BSNE_Loss=0.8904,local_loss: 0.22452318758314757,global_loss: 0.6658961772918701\n",
      "Step 96: BSNE_Loss=1.1982,local_loss: 0.3216495503926013,global_loss: 0.8765686750411987\n",
      "Step 97: BSNE_Loss=1.4525,local_loss: 0.371797549089488,global_loss: 1.080706238746643\n",
      "Step 98: BSNE_Loss=1.0678,local_loss: 0.16456867188701302,global_loss: 0.9032140970230103\n",
      "Step 99: BSNE_Loss=0.9070,local_loss: 0.3129039718296917,global_loss: 0.5940685272216797\n",
      "Step 100: BSNE_Loss=1.1193,local_loss: 0.2891355171213547,global_loss: 0.8301844000816345\n",
      "Step 101: BSNE_Loss=0.8962,local_loss: 0.1894818993326457,global_loss: 0.7066707015037537\n",
      "Step 102: BSNE_Loss=1.3324,local_loss: 0.17659704409229612,global_loss: 1.155807614326477\n",
      "Step 103: BSNE_Loss=1.1587,local_loss: 0.23593871761238489,global_loss: 0.922712504863739\n",
      "Step 104: BSNE_Loss=0.6542,local_loss: 0.32365187879587154,global_loss: 0.3305042088031769\n",
      "Step 105: BSNE_Loss=1.5447,local_loss: 0.09763351744562675,global_loss: 1.4471102952957153\n",
      "Step 106: BSNE_Loss=0.9191,local_loss: 0.2518217036342131,global_loss: 0.6672977209091187\n",
      "Step 107: BSNE_Loss=2.0180,local_loss: 0.41519502473175446,global_loss: 1.6027765274047852\n",
      "Step 108: BSNE_Loss=0.8373,local_loss: 0.13264082110547665,global_loss: 0.7046194076538086\n",
      "Step 109: BSNE_Loss=1.1782,local_loss: 0.41628352608819585,global_loss: 0.7619550824165344\n",
      "Step 110: BSNE_Loss=1.1478,local_loss: 0.4776566294269708,global_loss: 0.67014479637146\n",
      "Step 111: BSNE_Loss=0.9524,local_loss: 0.4093619883076808,global_loss: 0.543077826499939\n",
      "Step 112: BSNE_Loss=0.9536,local_loss: 0.3087888973634052,global_loss: 0.6448553204536438\n",
      "Step 113: BSNE_Loss=1.0761,local_loss: 0.2673483360184429,global_loss: 0.8087552785873413\n",
      "Step 114: BSNE_Loss=1.1650,local_loss: 0.11983683747317228,global_loss: 1.045114278793335\n",
      "Step 115: BSNE_Loss=1.0975,local_loss: 0.49372233117295167,global_loss: 0.6037537455558777\n",
      "Step 116: BSNE_Loss=0.7599,local_loss: 0.20759289475316867,global_loss: 0.5523132681846619\n",
      "Step 117: BSNE_Loss=2.0390,local_loss: 0.33889335779795493,global_loss: 1.7001498937606812\n",
      "Step 118: BSNE_Loss=0.9692,local_loss: 0.33661034051049504,global_loss: 0.6326335668563843\n",
      "Step 119: BSNE_Loss=1.2479,local_loss: 0.2740078733925043,global_loss: 0.9739090800285339\n",
      "Step 120: BSNE_Loss=0.8935,local_loss: 0.14998824328860957,global_loss: 0.743537425994873\n",
      "Step 121: BSNE_Loss=0.9407,local_loss: 0.28519990496057046,global_loss: 0.6554740071296692\n",
      "Step 122: BSNE_Loss=1.0970,local_loss: 0.34941931683572236,global_loss: 0.7475905418395996\n",
      "Step 123: BSNE_Loss=0.9426,local_loss: 0.2387431841014982,global_loss: 0.7038148641586304\n",
      "Step 124: BSNE_Loss=1.3689,local_loss: 0.1469661254380351,global_loss: 1.2219358682632446\n",
      "Step 125: BSNE_Loss=1.1536,local_loss: 0.08757098540444665,global_loss: 1.0660264492034912\n",
      "Step 126: BSNE_Loss=1.6078,local_loss: 0.4340179304465268,global_loss: 1.173736572265625\n",
      "Step 127: BSNE_Loss=0.7479,local_loss: 0.2665609489803031,global_loss: 0.4813026785850525\n",
      "Step 128: BSNE_Loss=0.8701,local_loss: 0.23823727701333064,global_loss: 0.6318464279174805\n",
      "Step 133: BSNE_Loss=1.5861,local_loss: 0.2666835382420203,global_loss: 1.3193695545196533\n",
      "Step 134: BSNE_Loss=0.9964,local_loss: 0.19678911972705607,global_loss: 0.7995803952217102\n",
      "Step 135: BSNE_Loss=1.0606,local_loss: 0.23273781222546133,global_loss: 0.8278887271881104\n",
      "Step 136: BSNE_Loss=0.8527,local_loss: 0.27078394881864315,global_loss: 0.5819307565689087\n",
      "Step 137: BSNE_Loss=1.1459,local_loss: 0.3295615051424998,global_loss: 0.816380500793457\n",
      "Step 138: BSNE_Loss=1.0273,local_loss: 0.3679950128265911,global_loss: 0.6592605113983154\n",
      "Step 139: BSNE_Loss=0.7886,local_loss: 0.14119927028491489,global_loss: 0.6473909616470337\n",
      "Step 140: BSNE_Loss=1.6804,local_loss: 0.15704874785544098,global_loss: 1.5233303308486938\n",
      "Step 141: BSNE_Loss=1.2310,local_loss: 0.2110436593168075,global_loss: 1.0199278593063354\n",
      "Step 142: BSNE_Loss=1.7145,local_loss: 0.022022688965462497,global_loss: 1.6924939155578613\n",
      "Step 143: BSNE_Loss=1.3269,local_loss: 0.23514753202547273,global_loss: 1.0917118787765503\n",
      "Step 144: BSNE_Loss=0.9318,local_loss: 0.16216536956183028,global_loss: 0.7696666717529297\n",
      "Step 145: BSNE_Loss=0.9877,local_loss: 0.33506403773393145,global_loss: 0.6526838541030884\n",
      "Step 146: BSNE_Loss=1.4655,local_loss: -0.03001335944067159,global_loss: 1.4954770803451538\n",
      "Step 147: BSNE_Loss=1.5415,local_loss: 0.48554352237452675,global_loss: 1.0559786558151245\n",
      "Step 148: BSNE_Loss=1.2348,local_loss: 0.20726137121508095,global_loss: 1.0275723934173584\n",
      "Step 149: BSNE_Loss=1.1337,local_loss: 0.20617286001943236,global_loss: 0.9274857640266418\n",
      "Step 150: BSNE_Loss=0.6929,local_loss: 0.11818109718284558,global_loss: 0.5747339725494385\n",
      "Step 151: BSNE_Loss=1.0019,local_loss: 0.12151903816407603,global_loss: 0.8803825974464417\n",
      "Step 152: BSNE_Loss=1.0518,local_loss: 0.30183380781685637,global_loss: 0.7499855160713196\n",
      "Step 153: BSNE_Loss=1.2587,local_loss: 0.14446705932536452,global_loss: 1.114248275756836\n",
      "Step 154: BSNE_Loss=0.9369,local_loss: 0.2303546584590387,global_loss: 0.7065108418464661\n",
      "Step 155: BSNE_Loss=0.9780,local_loss: 0.17465117343599793,global_loss: 0.8033530712127686\n",
      "Step 156: BSNE_Loss=0.8957,local_loss: 0.3240280562354792,global_loss: 0.5716865062713623\n",
      "Step 157: BSNE_Loss=0.4620,local_loss: 0.2521300912640506,global_loss: 0.2098989635705948\n",
      "Step 158: BSNE_Loss=1.2984,local_loss: 0.17467111942277697,global_loss: 1.12374746799469\n",
      "Step 159: BSNE_Loss=0.7715,local_loss: 0.3796474196744958,global_loss: 0.3918417692184448\n",
      "Step 160: BSNE_Loss=0.9552,local_loss: 0.23434317828497192,global_loss: 0.7208425998687744\n",
      "Step 161: BSNE_Loss=0.7524,local_loss: 0.17888932582230893,global_loss: 0.5735498666763306\n",
      "Step 162: BSNE_Loss=1.6588,local_loss: 0.42770510047722277,global_loss: 1.2310956716537476\n",
      "Step 163: BSNE_Loss=1.4413,local_loss: 0.28287852351816606,global_loss: 1.158451795578003\n",
      "Step 164: BSNE_Loss=1.1415,local_loss: 0.3088582560863841,global_loss: 0.8326463103294373\n",
      "Step 165: BSNE_Loss=0.6829,local_loss: 0.3238146682343006,global_loss: 0.3590948283672333\n",
      "Step 166: BSNE_Loss=1.3886,local_loss: 0.4548410706069285,global_loss: 0.9337224960327148\n",
      "Step 167: BSNE_Loss=1.1071,local_loss: 0.1361558467553532,global_loss: 0.9709017276763916\n",
      "Step 168: BSNE_Loss=1.2202,local_loss: 0.24940137218969874,global_loss: 0.9708172082901001\n",
      "Step 169: BSNE_Loss=0.5826,local_loss: 0.11125784596158705,global_loss: 0.4713039696216583\n",
      "Step 170: BSNE_Loss=0.9478,local_loss: 0.2757500630825597,global_loss: 0.6720317602157593\n",
      "Step 171: BSNE_Loss=0.6522,local_loss: 0.3581203703017188,global_loss: 0.2941180467605591\n",
      "Step 172: BSNE_Loss=1.2996,local_loss: 0.40636020400212225,global_loss: 0.8932053446769714\n",
      "Step 173: BSNE_Loss=1.0737,local_loss: 0.3226899637231231,global_loss: 0.7510393261909485\n",
      "Step 174: BSNE_Loss=1.2500,local_loss: 0.21443333969204684,global_loss: 1.0356130599975586\n",
      "Step 175: BSNE_Loss=1.8131,local_loss: 0.23300140058264224,global_loss: 1.5800979137420654\n",
      "Step 176: BSNE_Loss=1.2720,local_loss: 0.22777714220545292,global_loss: 1.0441910028457642\n",
      "Step 177: BSNE_Loss=1.1758,local_loss: 0.12302603197861946,global_loss: 1.0527386665344238\n",
      "Step 178: BSNE_Loss=1.1344,local_loss: 0.12384485815524342,global_loss: 1.0105559825897217\n",
      "Step 179: BSNE_Loss=1.6872,local_loss: 0.27292627735254144,global_loss: 1.4142916202545166\n",
      "Step 180: BSNE_Loss=1.1829,local_loss: 0.1541157893466791,global_loss: 1.0287773609161377\n",
      "Step 181: BSNE_Loss=0.9743,local_loss: 0.1763879599319514,global_loss: 0.7978729009628296\n",
      "Step 182: BSNE_Loss=1.3197,local_loss: 0.22409669305665425,global_loss: 1.095618486404419\n",
      "Step 183: BSNE_Loss=1.3996,local_loss: 0.3568327545897991,global_loss: 1.042787790298462\n",
      "Step 184: BSNE_Loss=1.4458,local_loss: 0.22944028919316573,global_loss: 1.2163392305374146\n",
      "Step 185: BSNE_Loss=1.5615,local_loss: 0.35659067283654483,global_loss: 1.2048829793930054\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 186: BSNE_Loss=0.7080,local_loss: 0.28519723106419503,global_loss: 0.4228094518184662\n",
      "Step 187: BSNE_Loss=1.0663,local_loss: 0.17395566761288814,global_loss: 0.8923149108886719\n",
      "Step 188: BSNE_Loss=1.1497,local_loss: 0.12674636930939243,global_loss: 1.0229918956756592\n",
      "Step 189: BSNE_Loss=1.0074,local_loss: 0.19109768458386747,global_loss: 0.8162996768951416\n",
      "Step 190: BSNE_Loss=1.1100,local_loss: 0.4810684922799055,global_loss: 0.6289372444152832\n",
      "Step 191: BSNE_Loss=1.4786,local_loss: 0.09475627180068952,global_loss: 1.3838471174240112\n",
      "Step 192: BSNE_Loss=0.6058,local_loss: 0.2816292463851097,global_loss: 0.3241505026817322\n",
      "Step 193: BSNE_Loss=1.0055,local_loss: 0.23246612966279925,global_loss: 0.7729849815368652\n",
      "Step 194: BSNE_Loss=1.3877,local_loss: 0.25837321204473734,global_loss: 1.1293480396270752\n",
      "Step 195: BSNE_Loss=1.1126,local_loss: 0.28761414707258287,global_loss: 0.8249779939651489\n",
      "Step 196: BSNE_Loss=0.6365,local_loss: 0.34348965904355994,global_loss: 0.29305991530418396\n",
      "Step 197: BSNE_Loss=1.7008,local_loss: 0.13340289012463494,global_loss: 1.567375898361206\n",
      "Step 198: BSNE_Loss=1.2916,local_loss: 0.30296686539309853,global_loss: 0.9885969161987305\n",
      "Step 199: BSNE_Loss=1.1194,local_loss: 0.3950462162680672,global_loss: 0.7243829965591431\n",
      "Step 200: BSNE_Loss=1.5612,local_loss: 0.44570935111406146,global_loss: 1.115494966506958\n",
      "Step 201: BSNE_Loss=1.4419,local_loss: 0.2660939426553843,global_loss: 1.1757738590240479\n",
      "Step 202: BSNE_Loss=1.7810,local_loss: 0.19259536622694684,global_loss: 1.5883903503417969\n",
      "Step 203: BSNE_Loss=0.4132,local_loss: 0.2633998998744216,global_loss: 0.1498403400182724\n",
      "Step 204: BSNE_Loss=1.2447,local_loss: 0.02212378214038733,global_loss: 1.2225397825241089\n",
      "Step 205: BSNE_Loss=1.0929,local_loss: 0.17811391872041074,global_loss: 0.9147563576698303\n",
      "Step 206: BSNE_Loss=0.7983,local_loss: 0.3573842895555983,global_loss: 0.44093137979507446\n",
      "Step 207: BSNE_Loss=0.8483,local_loss: 0.22114051925218217,global_loss: 0.6271621584892273\n",
      "Step 208: BSNE_Loss=1.3562,local_loss: 0.23308052424358547,global_loss: 1.1231379508972168\n",
      "Step 209: BSNE_Loss=1.2151,local_loss: 0.26507690847284826,global_loss: 0.9500173926353455\n",
      "Step 210: BSNE_Loss=0.9402,local_loss: 0.2674920001763724,global_loss: 0.6727170348167419\n",
      "Step 211: BSNE_Loss=1.3747,local_loss: 0.1269004049626908,global_loss: 1.247749924659729\n",
      "Step 212: BSNE_Loss=0.3601,local_loss: 0.30808344665928694,global_loss: 0.05204807594418526\n",
      "Step 213: BSNE_Loss=1.1273,local_loss: 0.5105631413711421,global_loss: 0.6167208552360535\n",
      "Step 214: BSNE_Loss=1.1100,local_loss: 0.22416145856969935,global_loss: 0.8858250975608826\n",
      "Step 215: BSNE_Loss=1.0446,local_loss: 0.15378636902754145,global_loss: 0.8908116221427917\n",
      "Step 216: BSNE_Loss=0.5622,local_loss: 0.2279708451909086,global_loss: 0.33418914675712585\n",
      "Step 217: BSNE_Loss=1.3798,local_loss: 0.16453640444728748,global_loss: 1.2152502536773682\n",
      "Step 218: BSNE_Loss=0.8602,local_loss: 0.12538578813623524,global_loss: 0.7348038554191589\n",
      "Step 219: BSNE_Loss=0.8147,local_loss: 0.09277312103941021,global_loss: 0.7219702005386353\n",
      "Step 220: BSNE_Loss=1.3686,local_loss: 0.3297278956745733,global_loss: 1.0389220714569092\n",
      "Step 221: BSNE_Loss=1.0674,local_loss: 0.2547091745819112,global_loss: 0.8126500248908997\n",
      "Step 222: BSNE_Loss=0.6367,local_loss: 0.22904018672404572,global_loss: 0.4076242744922638\n",
      "Step 223: BSNE_Loss=1.2615,local_loss: 0.2592945662018111,global_loss: 1.002213478088379\n",
      "Step 224: BSNE_Loss=1.1650,local_loss: 0.07512321673537435,global_loss: 1.0898669958114624\n",
      "Step 225: BSNE_Loss=0.7850,local_loss: 0.3138154628649461,global_loss: 0.4711921811103821\n",
      "Step 226: BSNE_Loss=0.6236,local_loss: 0.13175370484645524,global_loss: 0.4918631315231323\n",
      "Step 227: BSNE_Loss=0.8256,local_loss: 0.23043104079739615,global_loss: 0.5952100157737732\n",
      "Step 228: BSNE_Loss=0.8682,local_loss: 0.1980104598720597,global_loss: 0.6702254414558411\n",
      "Step 229: BSNE_Loss=0.8084,local_loss: 0.3077645751440269,global_loss: 0.500647783279419\n",
      "Step 230: BSNE_Loss=0.9434,local_loss: 0.16106949034129278,global_loss: 0.7823094129562378\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[8]\u001b[39m\u001b[32m, line 162\u001b[39m\n\u001b[32m    160\u001b[39m bu_node_features = []\n\u001b[32m    161\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m node_idx \u001b[38;5;129;01min\u001b[39;00m bu_nodes:\n\u001b[32m--> \u001b[39m\u001b[32m162\u001b[39m     node_subgraph = \u001b[43mcreate_node_subgraph\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnode_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfeat_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_indexs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    163\u001b[39m     _, node_feature = bsne_model([node_subgraph])\n\u001b[32m    164\u001b[39m     bu_node_features.append(node_feature.squeeze(\u001b[32m0\u001b[39m))\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 52\u001b[39m, in \u001b[36mcreate_node_subgraph\u001b[39m\u001b[34m(node_idx, feat_data, edge_indexs, device)\u001b[39m\n\u001b[32m     50\u001b[39m     edge_index = edge_indexs[rel_idx][\u001b[32m0\u001b[39m].cpu().numpy()\n\u001b[32m     51\u001b[39m     \u001b[38;5;66;03m# 找出以中心点为起点的边的终点\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m52\u001b[39m     rel_neighbors = \u001b[43medge_index\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m1\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[43medge_index\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m \u001b[49m\u001b[43m==\u001b[49m\u001b[43m \u001b[49m\u001b[43mnode_idx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m.\u001b[49m\u001b[43mtolist\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     53\u001b[39m     neighbors.update(rel_neighbors)\n\u001b[32m     55\u001b[39m \u001b[38;5;66;03m# 移除中心节点自身\u001b[39;00m\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 进行完整测试（新）（每个step重新采样子图）（加入平方项）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "#     \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.0005, #0.0001\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "    \n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "\n",
    "loss_threshold = 0.1\n",
    "sample_size = 50\n",
    "max_steps_per_center = 300  # 防止死循环\n",
    "max_epochs = 100\n",
    "# center_indices = list(range(feat_data.shape[0]))\n",
    "# 在每轮epoch前随机打乱中心点顺序\n",
    "rd.shuffle(center_indices)\n",
    "\n",
    "# 限制训练的中心点数量\n",
    "center_indices = center_indices[:max_epochs]\n",
    "\n",
    "\n",
    "for epoch, center_idx in enumerate(center_indices):\n",
    "    print(f\"\\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===\")\n",
    "    step = 0\n",
    "    while True:\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        total_loss = 0.0  # 每个step都要重置\n",
    "        eps = 1e-10\n",
    "        \n",
    "        # 构建Bp子图\n",
    "        dist_row = dist_matrix[center_idx].cpu().numpy()\n",
    "        probs = np.power(10.0, -dist_row)\n",
    "        probs[center_idx] = 0\n",
    "        probs /= probs.sum()\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            bp_nodes = [center_idx] + neighbors.tolist()\n",
    "        else:\n",
    "            bp_nodes = [center_idx]\n",
    "        \n",
    "        # 构建Bu子图\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "            bu_nodes = [center_idx] + neighbors.tolist()\n",
    "        else:\n",
    "            bu_nodes = [center_idx]\n",
    "        \n",
    "        # 计算Bp子图中所有节点的特征\n",
    "        bp_node_features = []\n",
    "        for node_idx in bp_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "        bp_features = torch.stack(bp_node_features)\n",
    "        \n",
    "        # 计算Bu子图中所有节点的特征\n",
    "        bu_node_features = []\n",
    "        for node_idx in bu_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "        bu_features = torch.stack(bu_node_features)\n",
    "        \n",
    "        # 计算loss\n",
    "        center_feature = bp_features[0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_nodes[1:]\n",
    "        other_bp_features = bp_features[1:]\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            # 计算中心节点到其他Bp节点的原始距离\n",
    "            orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "            # 计算P向量（基于节点路径距离）\n",
    "            P = np.exp(-orig_dists_bp)\n",
    "            P_sum = P.sum()\n",
    "            P = P / (P_sum + eps)\n",
    "            P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "        # 计算中心节点到其他Bp节点的特征距离\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "        feat_dists_bp.requires_grad_(True)\n",
    "\n",
    "        \n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "        \n",
    "#         log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        log_ratio = torch.log(((P + eps)/(Q + eps))**2)\n",
    "        loss_local = log_ratio.mean() \n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features_ = bu_features[1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features_).squeeze(0)\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        \n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10)**2)\n",
    "\n",
    "        # = 5. 损失合并 =\n",
    "        total_loss += loss_local + loss_global\n",
    "        \n",
    "        total_loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        print(f\"Step {step}: BSNE_Loss={total_loss.item():.4f},local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        step += 1\n",
    "        \n",
    "        if total_loss.item() < loss_threshold or step >= max_steps_per_center:\n",
    "            print(f\"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}\")\n",
    "            break\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "92bf8d2a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "54a84976",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "\n",
      "=== Pretraining Epoch 0 (Center Node: 0) ===\n",
      "Step 0: BSNE_Loss=1.0094,local_loss: 0.22076474530221543,global_loss: 0.7886244654655457\n",
      "Step 1: BSNE_Loss=0.9269,local_loss: 0.3400540764987347,global_loss: 0.5868368148803711\n",
      "Step 5: BSNE_Loss=0.7145,local_loss: 0.3242723812011871,global_loss: 0.3902641534805298\n",
      "Step 6: BSNE_Loss=0.9563,local_loss: 0.4351895571441938,global_loss: 0.5211324095726013\n",
      "Step 7: BSNE_Loss=0.6163,local_loss: 0.2639216354645507,global_loss: 0.352428138256073\n",
      "Step 8: BSNE_Loss=0.9263,local_loss: 0.5488752750200009,global_loss: 0.3774597644805908\n",
      "Step 9: BSNE_Loss=1.1453,local_loss: 0.40329397309108544,global_loss: 0.7420495748519897\n",
      "Step 10: BSNE_Loss=0.6129,local_loss: 0.426972635045886,global_loss: 0.18589891493320465\n",
      "Step 11: BSNE_Loss=0.5800,local_loss: 0.07811889613009193,global_loss: 0.5018847584724426\n",
      "Step 12: BSNE_Loss=0.7334,local_loss: 0.47441538950016254,global_loss: 0.25896742939949036\n",
      "Step 13: BSNE_Loss=1.0268,local_loss: 0.4900883664074108,global_loss: 0.5366708636283875\n",
      "Step 14: BSNE_Loss=0.9089,local_loss: 0.4520172517925922,global_loss: 0.4568616449832916\n",
      "Step 15: BSNE_Loss=0.7330,local_loss: 0.35688206515553655,global_loss: 0.37611672282218933\n",
      "Step 16: BSNE_Loss=0.8320,local_loss: 0.18074280264752104,global_loss: 0.6512154340744019\n",
      "Step 17: BSNE_Loss=0.8527,local_loss: 0.2604674014455495,global_loss: 0.5922326445579529\n",
      "Step 18: BSNE_Loss=0.6529,local_loss: 0.3760506932582813,global_loss: 0.2768773138523102\n",
      "Step 19: BSNE_Loss=1.0469,local_loss: 0.2607698078717881,global_loss: 0.7861332297325134\n",
      "Step 20: BSNE_Loss=0.6573,local_loss: 0.3654402725964534,global_loss: 0.2918565571308136\n",
      "Step 21: BSNE_Loss=1.2093,local_loss: 0.07329738129533633,global_loss: 1.1360104084014893\n",
      "Step 22: BSNE_Loss=0.9354,local_loss: 0.2615454939846862,global_loss: 0.6738379597663879\n",
      "Step 23: BSNE_Loss=0.7131,local_loss: 0.3138972742112183,global_loss: 0.3992459774017334\n",
      "Step 24: BSNE_Loss=1.0151,local_loss: 0.30995752119031617,global_loss: 0.7051088213920593\n",
      "Step 25: BSNE_Loss=0.6898,local_loss: 0.36663083891349457,global_loss: 0.3231373429298401\n",
      "Step 26: BSNE_Loss=0.6315,local_loss: 0.3112981838314801,global_loss: 0.3202391564846039\n",
      "Step 27: BSNE_Loss=0.4159,local_loss: 0.3414310734150333,global_loss: 0.07443440705537796\n",
      "Step 28: BSNE_Loss=0.3761,local_loss: 0.4370363056827628,global_loss: -0.06089919060468674\n",
      "Step 29: BSNE_Loss=0.9078,local_loss: 0.3130684584332888,global_loss: 0.5946879386901855\n",
      "Step 30: BSNE_Loss=0.2900,local_loss: 0.40750401619891874,global_loss: -0.1175035834312439\n",
      "Step 31: BSNE_Loss=1.1277,local_loss: 0.31445874048851563,global_loss: 0.8132718205451965\n",
      "Step 32: BSNE_Loss=1.1328,local_loss: 0.35769698495251206,global_loss: 0.77508544921875\n",
      "Step 33: BSNE_Loss=1.1275,local_loss: 0.28474479262640356,global_loss: 0.8427222967147827\n",
      "Step 34: BSNE_Loss=0.7598,local_loss: 0.3216825939997116,global_loss: 0.43814945220947266\n",
      "Step 35: BSNE_Loss=0.7961,local_loss: 0.39809919283132467,global_loss: 0.39799582958221436\n",
      "Step 36: BSNE_Loss=0.6279,local_loss: 0.4581119610563325,global_loss: 0.16980992257595062\n",
      "Step 37: BSNE_Loss=0.7555,local_loss: 0.42450568918278364,global_loss: 0.330949604511261\n",
      "Step 38: BSNE_Loss=0.8874,local_loss: 0.4214734803635263,global_loss: 0.46590113639831543\n",
      "Step 39: BSNE_Loss=0.5724,local_loss: 0.3224958133685385,global_loss: 0.2499324232339859\n",
      "Step 40: BSNE_Loss=0.6892,local_loss: 0.14680489458949963,global_loss: 0.5424126386642456\n",
      "Step 41: BSNE_Loss=0.6159,local_loss: 0.33308343390766243,global_loss: 0.28277361392974854\n",
      "Step 42: BSNE_Loss=1.4736,local_loss: 0.31794137498874514,global_loss: 1.1556719541549683\n",
      "Step 43: BSNE_Loss=0.3616,local_loss: 0.6023150997969055,global_loss: -0.24071194231510162\n",
      "Step 44: BSNE_Loss=1.1079,local_loss: 0.31738828513709894,global_loss: 0.7905066609382629\n",
      "Step 45: BSNE_Loss=0.7239,local_loss: 0.31187728705317,global_loss: 0.41200992465019226\n",
      "Step 46: BSNE_Loss=0.5600,local_loss: 0.366086341145085,global_loss: 0.19388797879219055\n",
      "Step 47: BSNE_Loss=0.5100,local_loss: 0.23051157216300597,global_loss: 0.2795325219631195\n",
      "Step 48: BSNE_Loss=0.9714,local_loss: 0.42823737743850326,global_loss: 0.5431321859359741\n",
      "Step 49: BSNE_Loss=0.9541,local_loss: 0.2259462186698086,global_loss: 0.7281988859176636\n",
      "Step 50: BSNE_Loss=0.7820,local_loss: 0.20030295296496775,global_loss: 0.5817428231239319\n",
      "Step 51: BSNE_Loss=0.5024,local_loss: 0.19352083873943562,global_loss: 0.3088729679584503\n",
      "Step 52: BSNE_Loss=1.1157,local_loss: 0.41182376626619466,global_loss: 0.7038894891738892\n",
      "Step 53: BSNE_Loss=0.8942,local_loss: 0.27030252036766406,global_loss: 0.6239383816719055\n",
      "Step 54: BSNE_Loss=0.4323,local_loss: 0.4593953946113699,global_loss: -0.027106421068310738\n",
      "Step 55: BSNE_Loss=0.6515,local_loss: 0.3076966569616304,global_loss: 0.343771368265152\n",
      "Step 56: BSNE_Loss=0.6045,local_loss: 0.5153609667817003,global_loss: 0.08913673460483551\n",
      "Step 57: BSNE_Loss=0.4234,local_loss: 0.31086350191618517,global_loss: 0.11258526891469955\n",
      "Step 58: BSNE_Loss=0.7008,local_loss: 0.5106864895974877,global_loss: 0.1901012510061264\n",
      "Step 59: BSNE_Loss=0.5529,local_loss: 0.3059051243790286,global_loss: 0.24703019857406616\n",
      "Step 60: BSNE_Loss=0.8497,local_loss: 0.2592660150117321,global_loss: 0.5904486179351807\n",
      "Step 61: BSNE_Loss=0.9381,local_loss: 0.3112138879241129,global_loss: 0.6268795728683472\n",
      "Step 62: BSNE_Loss=0.3963,local_loss: 0.24333781123759185,global_loss: 0.15292078256607056\n",
      "Step 63: BSNE_Loss=-0.0228,local_loss: 0.5467459890455264,global_loss: -0.5695337653160095\n",
      "Center node 0 finished at step 64 with loss -0.0228\n",
      "\n",
      "=== Pretraining Epoch 1 (Center Node: 1) ===\n",
      "Step 0: BSNE_Loss=0.6960,local_loss: 0.12116135621253704,global_loss: 0.5748679637908936\n",
      "Step 1: BSNE_Loss=0.6974,local_loss: 0.24872879680036528,global_loss: 0.44864508509635925\n",
      "Step 2: BSNE_Loss=2.1506,local_loss: 0.22495949692794945,global_loss: 1.9256640672683716\n",
      "Step 3: BSNE_Loss=1.3810,local_loss: 0.09685755445283231,global_loss: 1.2841402292251587\n",
      "Step 4: BSNE_Loss=1.0900,local_loss: 0.1848411363819755,global_loss: 0.905110239982605\n",
      "Step 5: BSNE_Loss=0.6519,local_loss: 0.06928004305075323,global_loss: 0.5826623439788818\n",
      "Step 6: BSNE_Loss=1.1771,local_loss: 0.07890991897406748,global_loss: 1.0981546640396118\n",
      "Step 7: BSNE_Loss=0.9870,local_loss: 0.1810653003775603,global_loss: 0.8059277534484863\n",
      "Step 8: BSNE_Loss=1.0419,local_loss: 0.11067921929230287,global_loss: 0.9312335252761841\n",
      "Step 9: BSNE_Loss=1.3447,local_loss: 0.2928628095778458,global_loss: 1.0518298149108887\n",
      "Step 10: BSNE_Loss=1.2184,local_loss: 0.22077051473390255,global_loss: 0.9975852370262146\n",
      "Step 11: BSNE_Loss=1.4031,local_loss: 0.3446959747841739,global_loss: 1.0583890676498413\n",
      "Step 12: BSNE_Loss=0.8926,local_loss: 0.6044401264383297,global_loss: 0.28814879059791565\n",
      "Step 13: BSNE_Loss=1.4018,local_loss: 0.1896616703377373,global_loss: 1.2121126651763916\n",
      "Step 14: BSNE_Loss=0.9195,local_loss: 0.06973043731588428,global_loss: 0.8497421741485596\n",
      "Step 15: BSNE_Loss=1.0591,local_loss: 0.16814527978969032,global_loss: 0.8909280300140381\n",
      "Step 16: BSNE_Loss=1.3190,local_loss: 0.16341224140249586,global_loss: 1.1556291580200195\n",
      "Step 17: BSNE_Loss=0.5099,local_loss: 0.1911667682460582,global_loss: 0.3187676966190338\n",
      "Step 18: BSNE_Loss=0.8519,local_loss: 0.33696484081807626,global_loss: 0.5149757862091064\n",
      "Step 19: BSNE_Loss=1.0719,local_loss: 0.1475923417650013,global_loss: 0.9243482351303101\n",
      "Step 20: BSNE_Loss=0.9537,local_loss: 0.22206430434096958,global_loss: 0.7316016554832458\n",
      "Step 21: BSNE_Loss=1.0016,local_loss: 0.4549512694824789,global_loss: 0.5466451644897461\n",
      "Step 22: BSNE_Loss=1.4657,local_loss: 0.1980356242431583,global_loss: 1.267655849456787\n",
      "Step 23: BSNE_Loss=0.9869,local_loss: 0.24817624519022105,global_loss: 0.7386972904205322\n",
      "Step 24: BSNE_Loss=0.9555,local_loss: 0.2786996435454321,global_loss: 0.6768019199371338\n",
      "Step 25: BSNE_Loss=1.3800,local_loss: 0.20219457809518615,global_loss: 1.1778324842453003\n",
      "Step 26: BSNE_Loss=0.9999,local_loss: 0.36557300923629166,global_loss: 0.6343345046043396\n",
      "Step 27: BSNE_Loss=1.2261,local_loss: 0.18495256813813415,global_loss: 1.0411046743392944\n",
      "Step 28: BSNE_Loss=1.4638,local_loss: 0.13900702594646408,global_loss: 1.324834942817688\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 29: BSNE_Loss=0.8745,local_loss: 0.2813310764052836,global_loss: 0.5931980609893799\n",
      "Step 30: BSNE_Loss=0.8530,local_loss: 0.3414647206066283,global_loss: 0.5115759968757629\n",
      "Step 31: BSNE_Loss=0.9144,local_loss: 0.40186948783517723,global_loss: 0.5125324130058289\n",
      "Step 32: BSNE_Loss=0.8037,local_loss: 0.22440791357214163,global_loss: 0.579304039478302\n",
      "Step 33: BSNE_Loss=1.1293,local_loss: 0.14790483111584415,global_loss: 0.98140549659729\n",
      "Step 34: BSNE_Loss=1.1382,local_loss: 0.4567657210968572,global_loss: 0.6814782619476318\n",
      "Step 35: BSNE_Loss=1.2494,local_loss: 0.0868949028107519,global_loss: 1.162529468536377\n",
      "Step 36: BSNE_Loss=1.2429,local_loss: 0.3001750039547199,global_loss: 0.9426780343055725\n",
      "Step 37: BSNE_Loss=1.1630,local_loss: 0.09845010161702236,global_loss: 1.0645002126693726\n",
      "Step 38: BSNE_Loss=1.1492,local_loss: 0.16577359864520574,global_loss: 0.983475923538208\n",
      "Step 39: BSNE_Loss=0.8441,local_loss: 0.12953343610765808,global_loss: 0.7145435810089111\n",
      "Step 40: BSNE_Loss=0.7717,local_loss: 0.1849046818866959,global_loss: 0.5867635607719421\n",
      "Step 41: BSNE_Loss=1.2593,local_loss: 0.16088629994025958,global_loss: 1.09839928150177\n",
      "Step 42: BSNE_Loss=1.1733,local_loss: 0.1643095418699188,global_loss: 1.0090141296386719\n",
      "Step 43: BSNE_Loss=0.8487,local_loss: 0.1993320125667941,global_loss: 0.6493579149246216\n",
      "Step 44: BSNE_Loss=1.3419,local_loss: 0.1371657114447525,global_loss: 1.2047356367111206\n",
      "Step 45: BSNE_Loss=1.7845,local_loss: 0.19021384335335179,global_loss: 1.5942527055740356\n",
      "Step 46: BSNE_Loss=1.2369,local_loss: 0.09436869057426847,global_loss: 1.1425808668136597\n",
      "Step 47: BSNE_Loss=0.6941,local_loss: 0.24789426529335934,global_loss: 0.44624996185302734\n",
      "Step 48: BSNE_Loss=1.3754,local_loss: 0.2717580618374081,global_loss: 1.1036350727081299\n",
      "Step 49: BSNE_Loss=1.8440,local_loss: 0.3668548775101778,global_loss: 1.4771133661270142\n",
      "Step 50: BSNE_Loss=1.1550,local_loss: 0.1458799426863589,global_loss: 1.0091147422790527\n",
      "Step 51: BSNE_Loss=1.0663,local_loss: 0.18641060030904033,global_loss: 0.8798801898956299\n",
      "Step 52: BSNE_Loss=0.8300,local_loss: 0.17108432460572318,global_loss: 0.6589345335960388\n",
      "Step 53: BSNE_Loss=0.7663,local_loss: 0.18908728614071035,global_loss: 0.577213704586029\n",
      "Step 54: BSNE_Loss=0.8970,local_loss: 0.1351834109178733,global_loss: 0.761857807636261\n",
      "Step 55: BSNE_Loss=0.9575,local_loss: 0.4237838837845926,global_loss: 0.5336896181106567\n",
      "Step 56: BSNE_Loss=1.2493,local_loss: 0.2863663858471987,global_loss: 0.9629242420196533\n",
      "Step 57: BSNE_Loss=0.6917,local_loss: 0.32370482330064604,global_loss: 0.36801812052726746\n",
      "Step 58: BSNE_Loss=0.8625,local_loss: 0.12208333844122214,global_loss: 0.7404103875160217\n",
      "Step 59: BSNE_Loss=0.6158,local_loss: 0.1699093551170485,global_loss: 0.44591623544692993\n",
      "Step 60: BSNE_Loss=1.0327,local_loss: 0.06650728847780171,global_loss: 0.9661884307861328\n",
      "Step 61: BSNE_Loss=0.8611,local_loss: 0.08378884304106862,global_loss: 0.7773047089576721\n",
      "Step 62: BSNE_Loss=0.6844,local_loss: 0.35899698838639393,global_loss: 0.3254411816596985\n",
      "Step 63: BSNE_Loss=0.4209,local_loss: 0.1468542726929322,global_loss: 0.2740190029144287\n",
      "Step 64: BSNE_Loss=1.0238,local_loss: 0.12752502407050012,global_loss: 0.896294355392456\n",
      "Step 65: BSNE_Loss=1.2586,local_loss: 0.20963782119315114,global_loss: 1.0489931106567383\n",
      "Step 66: BSNE_Loss=0.9978,local_loss: 0.2913418482799425,global_loss: 0.7064197063446045\n",
      "Step 67: BSNE_Loss=1.1167,local_loss: 0.3298038995031361,global_loss: 0.7868846654891968\n",
      "Step 68: BSNE_Loss=0.7227,local_loss: 0.14185411820149296,global_loss: 0.5808552503585815\n",
      "Step 69: BSNE_Loss=0.6008,local_loss: 0.23217521829289858,global_loss: 0.36862045526504517\n",
      "Step 70: BSNE_Loss=0.7631,local_loss: 0.27794052532782626,global_loss: 0.48511821031570435\n",
      "Step 71: BSNE_Loss=1.2805,local_loss: 0.22705904732397475,global_loss: 1.0534069538116455\n",
      "Step 72: BSNE_Loss=1.0704,local_loss: 0.32289228524111,global_loss: 0.7474939227104187\n",
      "Step 73: BSNE_Loss=0.7969,local_loss: 0.3148577567453859,global_loss: 0.4820115566253662\n",
      "Step 74: BSNE_Loss=1.1030,local_loss: 0.1352917945374052,global_loss: 0.9676834940910339\n",
      "Step 75: BSNE_Loss=0.8872,local_loss: 0.11745377497502638,global_loss: 0.7697476148605347\n",
      "Step 76: BSNE_Loss=1.2908,local_loss: 0.08271529818875062,global_loss: 1.2080788612365723\n",
      "Step 77: BSNE_Loss=0.5038,local_loss: 0.18377404306646938,global_loss: 0.32006266713142395\n",
      "Step 78: BSNE_Loss=0.7709,local_loss: 0.16555387509668976,global_loss: 0.6052994132041931\n",
      "Step 79: BSNE_Loss=0.7260,local_loss: 0.12169777543754155,global_loss: 0.6043326258659363\n",
      "Step 80: BSNE_Loss=0.5046,local_loss: 0.034909949885925134,global_loss: 0.4697350263595581\n",
      "Step 81: BSNE_Loss=0.9013,local_loss: 0.281113436798314,global_loss: 0.6201758980751038\n",
      "Step 82: BSNE_Loss=0.9235,local_loss: 0.2242267278771959,global_loss: 0.6992486119270325\n",
      "Step 83: BSNE_Loss=0.5357,local_loss: 0.19070968921035725,global_loss: 0.34501248598098755\n",
      "Step 84: BSNE_Loss=0.8272,local_loss: 0.08168304253619027,global_loss: 0.7454989552497864\n",
      "Step 85: BSNE_Loss=1.0606,local_loss: 0.12254124433780832,global_loss: 0.9380974173545837\n",
      "Step 86: BSNE_Loss=1.3718,local_loss: 0.17247483433125335,global_loss: 1.199342131614685\n",
      "Step 87: BSNE_Loss=0.2790,local_loss: 0.31900441768638566,global_loss: -0.04000520706176758\n",
      "Step 88: BSNE_Loss=0.8535,local_loss: 0.1615371488193078,global_loss: 0.6919766664505005\n",
      "Step 89: BSNE_Loss=0.5750,local_loss: 0.11197836222613554,global_loss: 0.46304386854171753\n",
      "Step 90: BSNE_Loss=0.6651,local_loss: 0.11438748727961114,global_loss: 0.5507420301437378\n",
      "Step 91: BSNE_Loss=0.9557,local_loss: 0.023917123819875395,global_loss: 0.9318199753761292\n",
      "Step 92: BSNE_Loss=1.0197,local_loss: 0.35600908683921945,global_loss: 0.6637212634086609\n",
      "Step 93: BSNE_Loss=0.8646,local_loss: 0.06337071767552335,global_loss: 0.8011820912361145\n",
      "Step 94: BSNE_Loss=0.5313,local_loss: 0.1742080560355948,global_loss: 0.3571294844150543\n",
      "Step 95: BSNE_Loss=1.0122,local_loss: 0.11247175669319798,global_loss: 0.8997166156768799\n",
      "Step 96: BSNE_Loss=1.0186,local_loss: 0.21376719633678457,global_loss: 0.8048819899559021\n",
      "Step 97: BSNE_Loss=0.7347,local_loss: 0.012937126399940678,global_loss: 0.7217797636985779\n",
      "Step 98: BSNE_Loss=0.8450,local_loss: 0.14262611780926895,global_loss: 0.7024211883544922\n",
      "Step 99: BSNE_Loss=0.6527,local_loss: 0.2421584953355248,global_loss: 0.41059044003486633\n",
      "Step 100: BSNE_Loss=0.8844,local_loss: 0.07956496633740022,global_loss: 0.8048826456069946\n",
      "Step 101: BSNE_Loss=1.1355,local_loss: 0.14568971227792293,global_loss: 0.9897985458374023\n",
      "Step 102: BSNE_Loss=1.0711,local_loss: 0.16430456999267284,global_loss: 0.9067633748054504\n",
      "Step 103: BSNE_Loss=0.8548,local_loss: 0.13084502716338517,global_loss: 0.7239435911178589\n",
      "Step 104: BSNE_Loss=1.0636,local_loss: 0.19401173948524125,global_loss: 0.8695945739746094\n",
      "Step 105: BSNE_Loss=0.8975,local_loss: 0.18072533326306428,global_loss: 0.7167584896087646\n",
      "Step 106: BSNE_Loss=0.7285,local_loss: 0.09151973467023719,global_loss: 0.6369488835334778\n",
      "Step 107: BSNE_Loss=1.1484,local_loss: -0.04470556247264703,global_loss: 1.1931390762329102\n",
      "Step 108: BSNE_Loss=1.2529,local_loss: 0.21209966851393824,global_loss: 1.0407558679580688\n",
      "Step 109: BSNE_Loss=1.4332,local_loss: 0.181146890856317,global_loss: 1.2520170211791992\n",
      "Step 110: BSNE_Loss=0.9778,local_loss: 0.18073459193249242,global_loss: 0.7971048355102539\n",
      "Step 111: BSNE_Loss=1.2958,local_loss: 0.07345741649418581,global_loss: 1.222314476966858\n",
      "Step 112: BSNE_Loss=1.2260,local_loss: 0.18562639940176978,global_loss: 1.0403261184692383\n",
      "Step 113: BSNE_Loss=0.8613,local_loss: 0.2086967643041797,global_loss: 0.6525608897209167\n",
      "Step 114: BSNE_Loss=0.4960,local_loss: 0.1672429093309504,global_loss: 0.32875770330429077\n",
      "Step 115: BSNE_Loss=1.0360,local_loss: 0.2683759353946055,global_loss: 0.7675955295562744\n",
      "Step 116: BSNE_Loss=0.4820,local_loss: 0.3733055389123433,global_loss: 0.10869520902633667\n",
      "Step 117: BSNE_Loss=0.7745,local_loss: 0.2232482430915531,global_loss: 0.5512924194335938\n",
      "Step 118: BSNE_Loss=0.9543,local_loss: 0.07888314483442063,global_loss: 0.875422477722168\n",
      "Step 119: BSNE_Loss=0.5311,local_loss: 0.1273080349514729,global_loss: 0.4038374423980713\n",
      "Step 120: BSNE_Loss=0.6794,local_loss: 0.23931658162189798,global_loss: 0.44005465507507324\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 121: BSNE_Loss=0.8322,local_loss: 0.06358688533777826,global_loss: 0.768575131893158\n",
      "Step 122: BSNE_Loss=0.7933,local_loss: 0.1952201034714907,global_loss: 0.5980736017227173\n",
      "Step 123: BSNE_Loss=1.0500,local_loss: 0.18095876797358457,global_loss: 0.869066059589386\n",
      "Step 124: BSNE_Loss=1.3655,local_loss: 0.13205014612400098,global_loss: 1.2334259748458862\n",
      "Step 125: BSNE_Loss=0.9806,local_loss: 0.30286424215648466,global_loss: 0.6777074933052063\n",
      "Step 126: BSNE_Loss=0.8461,local_loss: 0.1343770588990148,global_loss: 0.7116734385490417\n",
      "Step 127: BSNE_Loss=0.7921,local_loss: 0.16497444800541622,global_loss: 0.6271038055419922\n",
      "Step 128: BSNE_Loss=0.6665,local_loss: 0.12278315439853948,global_loss: 0.5437180995941162\n",
      "Step 129: BSNE_Loss=0.6620,local_loss: 0.11774183167843875,global_loss: 0.54425048828125\n",
      "Step 130: BSNE_Loss=0.7149,local_loss: 0.04346161309936113,global_loss: 0.6714060306549072\n",
      "Step 131: BSNE_Loss=1.1416,local_loss: 0.13849739716638226,global_loss: 1.0030925273895264\n",
      "Step 132: BSNE_Loss=0.4671,local_loss: 0.11597148705288185,global_loss: 0.351127952337265\n",
      "Step 133: BSNE_Loss=0.9037,local_loss: 0.006035328246621887,global_loss: 0.8977092504501343\n",
      "Step 134: BSNE_Loss=0.1639,local_loss: 0.35498460580543667,global_loss: -0.19104494154453278\n",
      "Center node 1 finished at step 135 with loss 0.1639\n",
      "\n",
      "=== Pretraining Epoch 2 (Center Node: 2) ===\n",
      "Step 0: BSNE_Loss=1.3055,local_loss: 0.1989496861447544,global_loss: 1.1065208911895752\n",
      "Step 1: BSNE_Loss=1.2734,local_loss: 0.5315422891105692,global_loss: 0.7418713569641113\n",
      "Step 2: BSNE_Loss=1.5987,local_loss: 0.26744012615176965,global_loss: 1.3312203884124756\n",
      "Step 3: BSNE_Loss=1.5771,local_loss: 0.2633343663048935,global_loss: 1.3138141632080078\n",
      "Step 4: BSNE_Loss=1.5744,local_loss: 0.3632224103012059,global_loss: 1.2111624479293823\n",
      "Step 5: BSNE_Loss=1.8364,local_loss: 0.42167504186143345,global_loss: 1.414734125137329\n",
      "Step 6: BSNE_Loss=1.0478,local_loss: 0.5474726254163876,global_loss: 0.5003060102462769\n",
      "Step 7: BSNE_Loss=1.3833,local_loss: 0.496817484478984,global_loss: 0.8864606618881226\n",
      "Step 8: BSNE_Loss=1.1960,local_loss: 0.30271802710721757,global_loss: 0.8933204412460327\n",
      "Step 9: BSNE_Loss=1.3818,local_loss: 0.3953335399635222,global_loss: 0.9864599704742432\n",
      "Step 10: BSNE_Loss=1.0931,local_loss: 0.42422417249116634,global_loss: 0.6689198017120361\n",
      "Step 11: BSNE_Loss=1.7505,local_loss: 0.40205750046504884,global_loss: 1.3484076261520386\n",
      "Step 12: BSNE_Loss=1.3756,local_loss: 0.21593705795778817,global_loss: 1.1597003936767578\n",
      "Step 13: BSNE_Loss=1.0841,local_loss: 0.27251423227128635,global_loss: 0.8115809559822083\n",
      "Step 14: BSNE_Loss=1.6609,local_loss: 0.685580592374369,global_loss: 0.9753385782241821\n",
      "Step 15: BSNE_Loss=0.9818,local_loss: 0.36992547336510073,global_loss: 0.6119032502174377\n",
      "Step 16: BSNE_Loss=1.1491,local_loss: 0.44289678493941415,global_loss: 0.7062380313873291\n",
      "Step 17: BSNE_Loss=1.2516,local_loss: 0.08963387639683014,global_loss: 1.1619939804077148\n",
      "Step 18: BSNE_Loss=1.7042,local_loss: 0.40766160804057555,global_loss: 1.296570897102356\n",
      "Step 19: BSNE_Loss=1.4668,local_loss: 0.12224857413713715,global_loss: 1.3445954322814941\n",
      "Step 20: BSNE_Loss=1.1487,local_loss: 0.20793171174018366,global_loss: 0.9407252073287964\n",
      "Step 21: BSNE_Loss=0.8483,local_loss: 0.0008618721229553428,global_loss: 0.8474679589271545\n",
      "Step 22: BSNE_Loss=0.9662,local_loss: 0.34713817711542805,global_loss: 0.6190842390060425\n",
      "Step 23: BSNE_Loss=0.9529,local_loss: 0.15845314790019147,global_loss: 0.7944069504737854\n",
      "Step 24: BSNE_Loss=1.4700,local_loss: 0.10684400644807486,global_loss: 1.3631093502044678\n",
      "Step 25: BSNE_Loss=0.9329,local_loss: 0.26661637645068387,global_loss: 0.6662888526916504\n",
      "Step 26: BSNE_Loss=1.3634,local_loss: 0.232568711787388,global_loss: 1.1307848691940308\n",
      "Step 27: BSNE_Loss=0.9901,local_loss: 0.2637433489888348,global_loss: 0.726381242275238\n",
      "Step 28: BSNE_Loss=1.6539,local_loss: 0.14929665518782922,global_loss: 1.50460946559906\n",
      "Step 29: BSNE_Loss=0.6706,local_loss: 0.07458642007062101,global_loss: 0.595970094203949\n",
      "Step 30: BSNE_Loss=1.6325,local_loss: 0.22619646965838883,global_loss: 1.406258463859558\n",
      "Step 31: BSNE_Loss=1.1999,local_loss: 0.14318120331785256,global_loss: 1.0567034482955933\n",
      "Step 32: BSNE_Loss=1.1046,local_loss: 0.24256355063915286,global_loss: 0.8620715141296387\n",
      "Step 33: BSNE_Loss=1.0345,local_loss: 0.0620628013167284,global_loss: 0.9724649786949158\n",
      "Step 34: BSNE_Loss=1.5649,local_loss: 0.06669582762819962,global_loss: 1.4982514381408691\n",
      "Step 35: BSNE_Loss=1.2223,local_loss: 0.1001356708981275,global_loss: 1.1221232414245605\n",
      "Step 36: BSNE_Loss=0.9994,local_loss: 0.1374488977663009,global_loss: 0.8619304299354553\n",
      "Step 37: BSNE_Loss=1.2336,local_loss: 0.12936610721218814,global_loss: 1.1042404174804688\n",
      "Step 38: BSNE_Loss=1.0640,local_loss: 0.182746513370712,global_loss: 0.8812713027000427\n",
      "Step 39: BSNE_Loss=1.5316,local_loss: 0.07173795485709461,global_loss: 1.4599064588546753\n",
      "Step 40: BSNE_Loss=1.1321,local_loss: 0.11179963445918861,global_loss: 1.0202865600585938\n",
      "Step 41: BSNE_Loss=0.8796,local_loss: 0.2256414700565083,global_loss: 0.6539260745048523\n",
      "Step 42: BSNE_Loss=0.9897,local_loss: 0.21184375859519328,global_loss: 0.7778769731521606\n",
      "Step 43: BSNE_Loss=0.3109,local_loss: 0.08775008661091645,global_loss: 0.2231578528881073\n",
      "Step 44: BSNE_Loss=1.0252,local_loss: 0.21954260544394147,global_loss: 0.8056138753890991\n",
      "Step 45: BSNE_Loss=0.8702,local_loss: -0.009204832995782546,global_loss: 0.8793664574623108\n",
      "Step 46: BSNE_Loss=1.4181,local_loss: 0.19630659976702816,global_loss: 1.2217583656311035\n",
      "Step 47: BSNE_Loss=0.9262,local_loss: 0.14874659277772648,global_loss: 0.7774063348770142\n",
      "Step 48: BSNE_Loss=1.4544,local_loss: -0.01792648338839337,global_loss: 1.4723515510559082\n",
      "Step 49: BSNE_Loss=0.8706,local_loss: 0.09145022688146746,global_loss: 0.7791044116020203\n",
      "Step 50: BSNE_Loss=1.3848,local_loss: 0.14059768099667785,global_loss: 1.2442097663879395\n",
      "Step 51: BSNE_Loss=1.9856,local_loss: 0.06434705483855739,global_loss: 1.9212608337402344\n",
      "Step 52: BSNE_Loss=1.3951,local_loss: 0.0939590347275236,global_loss: 1.301127314567566\n",
      "Step 53: BSNE_Loss=0.9625,local_loss: 0.12974093269590328,global_loss: 0.8327763080596924\n",
      "Step 54: BSNE_Loss=1.2689,local_loss: 0.1615388216453509,global_loss: 1.1073390245437622\n",
      "Step 55: BSNE_Loss=0.5482,local_loss: 0.08499479961320328,global_loss: 0.4632033705711365\n",
      "Step 56: BSNE_Loss=0.9992,local_loss: 0.05345350733056835,global_loss: 0.9457416534423828\n",
      "Step 57: BSNE_Loss=1.1339,local_loss: 0.11319301495868046,global_loss: 1.0206987857818604\n",
      "Step 58: BSNE_Loss=1.2318,local_loss: 0.10959885570199966,global_loss: 1.1222447156906128\n",
      "Step 59: BSNE_Loss=1.4735,local_loss: 0.24198537084262348,global_loss: 1.231482744216919\n",
      "Step 60: BSNE_Loss=1.1448,local_loss: 0.19032410934925537,global_loss: 0.9545140266418457\n",
      "Step 61: BSNE_Loss=0.7723,local_loss: 0.19181189461746062,global_loss: 0.5804521441459656\n",
      "Step 62: BSNE_Loss=0.9337,local_loss: 0.1699421724414453,global_loss: 0.7637968063354492\n",
      "Step 63: BSNE_Loss=0.9847,local_loss: 0.18450981168058606,global_loss: 0.8002017736434937\n",
      "Step 64: BSNE_Loss=0.8239,local_loss: 0.2560007392516893,global_loss: 0.5678630471229553\n",
      "Step 65: BSNE_Loss=0.5946,local_loss: 0.09846511680133535,global_loss: 0.4961282014846802\n",
      "Step 66: BSNE_Loss=1.2758,local_loss: 0.1940285570987193,global_loss: 1.0817971229553223\n",
      "Step 67: BSNE_Loss=1.1958,local_loss: 0.2223966545548472,global_loss: 0.9733977317810059\n",
      "Step 68: BSNE_Loss=0.9305,local_loss: 0.021313359643672473,global_loss: 0.909180760383606\n",
      "Step 69: BSNE_Loss=1.1194,local_loss: 0.020756105106316234,global_loss: 1.0986430644989014\n",
      "Step 70: BSNE_Loss=1.4188,local_loss: 0.06279631687107852,global_loss: 1.355993390083313\n",
      "Step 71: BSNE_Loss=1.0717,local_loss: 0.26304128843378705,global_loss: 0.8086163997650146\n",
      "Step 72: BSNE_Loss=0.7486,local_loss: 0.18731627827680586,global_loss: 0.5612839460372925\n",
      "Step 73: BSNE_Loss=0.4888,local_loss: 0.26154011098384167,global_loss: 0.2273046374320984\n",
      "Step 74: BSNE_Loss=1.0995,local_loss: 0.08189212362436987,global_loss: 1.0176221132278442\n",
      "Step 75: BSNE_Loss=0.7122,local_loss: 0.14205152609638433,global_loss: 0.5701203942298889\n",
      "Step 76: BSNE_Loss=1.0441,local_loss: 0.23367829683822494,global_loss: 0.8104492425918579\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 77: BSNE_Loss=1.0604,local_loss: 0.12257212678372215,global_loss: 0.9378775358200073\n",
      "Step 78: BSNE_Loss=1.2915,local_loss: 0.23598199858407262,global_loss: 1.0555505752563477\n",
      "Step 79: BSNE_Loss=1.4219,local_loss: -0.015597697054194413,global_loss: 1.437481164932251\n",
      "Step 80: BSNE_Loss=0.8001,local_loss: 0.05426414527944251,global_loss: 0.7458693981170654\n",
      "Step 81: BSNE_Loss=0.9426,local_loss: 0.06679199990610533,global_loss: 0.8758405447006226\n",
      "Step 82: BSNE_Loss=1.0299,local_loss: 0.25982388913919147,global_loss: 0.7700740694999695\n",
      "Step 83: BSNE_Loss=0.7256,local_loss: 0.06732813872998662,global_loss: 0.6582501530647278\n",
      "Step 84: BSNE_Loss=0.9130,local_loss: 0.043314361188061805,global_loss: 0.8696367740631104\n",
      "Step 85: BSNE_Loss=0.5305,local_loss: 0.09590255944810708,global_loss: 0.43456244468688965\n",
      "Step 86: BSNE_Loss=0.9609,local_loss: 0.03283986394944357,global_loss: 0.9280809164047241\n",
      "Step 87: BSNE_Loss=1.1413,local_loss: 0.1565064590364784,global_loss: 0.9848023653030396\n",
      "Step 88: BSNE_Loss=1.1051,local_loss: 0.19058374462775632,global_loss: 0.9145309925079346\n",
      "Step 89: BSNE_Loss=1.0264,local_loss: 0.1253046424811842,global_loss: 0.9011178016662598\n",
      "Step 90: BSNE_Loss=1.1219,local_loss: 0.18802278146597792,global_loss: 0.9339118003845215\n",
      "Step 91: BSNE_Loss=1.0597,local_loss: 0.048407524575146264,global_loss: 1.0113340616226196\n",
      "Step 92: BSNE_Loss=0.6373,local_loss: 0.11759458242340991,global_loss: 0.5196641087532043\n",
      "Step 93: BSNE_Loss=1.3636,local_loss: 0.12909220608587083,global_loss: 1.2345165014266968\n",
      "Step 94: BSNE_Loss=0.5882,local_loss: 0.1543638959158185,global_loss: 0.433871865272522\n",
      "Step 95: BSNE_Loss=0.9966,local_loss: 0.14339166599103115,global_loss: 0.8532567024230957\n",
      "Step 96: BSNE_Loss=0.8471,local_loss: 0.2668444738914541,global_loss: 0.5802733898162842\n",
      "Step 97: BSNE_Loss=1.1150,local_loss: 0.09561891833271315,global_loss: 1.019399642944336\n",
      "Step 98: BSNE_Loss=0.2972,local_loss: 0.1211835489044626,global_loss: 0.17598076164722443\n",
      "Step 99: BSNE_Loss=0.7553,local_loss: 0.22801983398200598,global_loss: 0.52724289894104\n",
      "Step 100: BSNE_Loss=0.9934,local_loss: 0.15709379918954702,global_loss: 0.8362655639648438\n",
      "Step 101: BSNE_Loss=1.2445,local_loss: 0.043936633978024046,global_loss: 1.2005629539489746\n",
      "Step 102: BSNE_Loss=0.9964,local_loss: 0.35797660306590456,global_loss: 0.6384578943252563\n",
      "Step 103: BSNE_Loss=1.2407,local_loss: -0.06558034981199036,global_loss: 1.3062832355499268\n",
      "Step 104: BSNE_Loss=1.3166,local_loss: 0.08874616950618766,global_loss: 1.2278107404708862\n",
      "Step 105: BSNE_Loss=0.7335,local_loss: 0.05020799179314717,global_loss: 0.6832849979400635\n",
      "Step 106: BSNE_Loss=1.2123,local_loss: 0.23817262078731266,global_loss: 0.9741031527519226\n",
      "Step 107: BSNE_Loss=0.8377,local_loss: 0.18978477784645623,global_loss: 0.6478971838951111\n",
      "Step 108: BSNE_Loss=1.2225,local_loss: 0.10926549458491357,global_loss: 1.1132782697677612\n",
      "Step 109: BSNE_Loss=0.8234,local_loss: 0.11368283673561401,global_loss: 0.7096831798553467\n",
      "Step 110: BSNE_Loss=1.2058,local_loss: 0.05619751481091364,global_loss: 1.1495575904846191\n",
      "Step 111: BSNE_Loss=1.2717,local_loss: 0.3411090912324614,global_loss: 0.930637776851654\n",
      "Step 112: BSNE_Loss=1.1981,local_loss: 0.16992222292865491,global_loss: 1.0281648635864258\n",
      "Step 113: BSNE_Loss=0.6162,local_loss: 0.12779379037697175,global_loss: 0.48840901255607605\n",
      "Step 114: BSNE_Loss=0.4883,local_loss: 0.15839863441246682,global_loss: 0.32987919449806213\n",
      "Step 115: BSNE_Loss=0.5875,local_loss: 0.1455312917168252,global_loss: 0.44192272424697876\n",
      "Step 116: BSNE_Loss=0.3516,local_loss: 0.07920231465684632,global_loss: 0.2723749279975891\n",
      "Step 117: BSNE_Loss=0.7153,local_loss: 0.0747991732073738,global_loss: 0.6404599547386169\n",
      "Step 118: BSNE_Loss=1.3086,local_loss: 0.1795053924687365,global_loss: 1.1291218996047974\n",
      "Step 119: BSNE_Loss=0.8885,local_loss: 0.14589318277777666,global_loss: 0.7426462769508362\n",
      "Step 120: BSNE_Loss=0.6693,local_loss: 0.002205154546696808,global_loss: 0.6671314835548401\n",
      "Step 121: BSNE_Loss=1.1993,local_loss: 0.08836756111820686,global_loss: 1.110970377922058\n",
      "Step 122: BSNE_Loss=1.2943,local_loss: 0.08918422877419233,global_loss: 1.2050790786743164\n",
      "Step 123: BSNE_Loss=1.2010,local_loss: 0.06637829275970666,global_loss: 1.1345765590667725\n",
      "Step 124: BSNE_Loss=0.7711,local_loss: 0.2085042432652211,global_loss: 0.5625901818275452\n",
      "Step 125: BSNE_Loss=0.9211,local_loss: 0.08855791224312569,global_loss: 0.8325881958007812\n",
      "Step 126: BSNE_Loss=1.0956,local_loss: 0.12382764386091805,global_loss: 0.9718023538589478\n",
      "Step 127: BSNE_Loss=1.1293,local_loss: 0.10921815204414145,global_loss: 1.0200613737106323\n",
      "Step 128: BSNE_Loss=0.5972,local_loss: 0.1274912799222791,global_loss: 0.46974119544029236\n",
      "Step 129: BSNE_Loss=1.5863,local_loss: 0.28103328133371813,global_loss: 1.3052423000335693\n",
      "Step 130: BSNE_Loss=1.2647,local_loss: 0.07463865314267558,global_loss: 1.190018892288208\n",
      "Step 131: BSNE_Loss=0.7907,local_loss: -0.02467315036255857,global_loss: 0.8153616189956665\n",
      "Step 132: BSNE_Loss=0.3110,local_loss: 0.07750339932784633,global_loss: 0.23352335393428802\n",
      "Step 133: BSNE_Loss=0.9846,local_loss: 0.24145690948656728,global_loss: 0.7431214451789856\n",
      "Step 134: BSNE_Loss=0.9522,local_loss: 0.05027280908832559,global_loss: 0.9019659757614136\n",
      "Step 135: BSNE_Loss=0.9264,local_loss: 0.04043721515516308,global_loss: 0.8859779834747314\n",
      "Step 136: BSNE_Loss=0.4840,local_loss: 0.04225456662839892,global_loss: 0.4417545199394226\n",
      "Step 137: BSNE_Loss=1.1997,local_loss: 0.24104501900710545,global_loss: 0.9586061239242554\n",
      "Step 138: BSNE_Loss=0.6074,local_loss: 0.1541957174339762,global_loss: 0.45320814847946167\n",
      "Step 139: BSNE_Loss=1.1804,local_loss: 0.02936985867688806,global_loss: 1.1510396003723145\n",
      "Step 140: BSNE_Loss=0.7639,local_loss: 0.009896515247197035,global_loss: 0.7540012001991272\n",
      "Step 141: BSNE_Loss=1.1584,local_loss: 0.035283208223837355,global_loss: 1.1231071949005127\n",
      "Step 142: BSNE_Loss=1.3501,local_loss: 0.09510099886488448,global_loss: 1.255046010017395\n",
      "Step 143: BSNE_Loss=1.3558,local_loss: 0.25821685982482867,global_loss: 1.0975719690322876\n",
      "Step 144: BSNE_Loss=0.5753,local_loss: 0.08230451126154278,global_loss: 0.4930380582809448\n",
      "Step 145: BSNE_Loss=0.9236,local_loss: 0.134329778193383,global_loss: 0.7892332673072815\n",
      "Step 146: BSNE_Loss=1.2188,local_loss: 0.07827600193784277,global_loss: 1.140481948852539\n",
      "Step 147: BSNE_Loss=0.6455,local_loss: 0.06890656160676314,global_loss: 0.5766410827636719\n",
      "Step 148: BSNE_Loss=0.7370,local_loss: 0.0932256558765585,global_loss: 0.6437699198722839\n",
      "Step 149: BSNE_Loss=0.9155,local_loss: 0.3776989742634466,global_loss: 0.5377871990203857\n",
      "Step 150: BSNE_Loss=0.7551,local_loss: 0.06403670875912566,global_loss: 0.6910338401794434\n",
      "Step 151: BSNE_Loss=1.7079,local_loss: 0.052657122265282366,global_loss: 1.6552201509475708\n",
      "Step 152: BSNE_Loss=0.5820,local_loss: 0.09544692898812163,global_loss: 0.4865795969963074\n",
      "Step 153: BSNE_Loss=0.7608,local_loss: 0.022964964114529262,global_loss: 0.7378538250923157\n",
      "Step 154: BSNE_Loss=1.3540,local_loss: 0.1308874260917475,global_loss: 1.2231241464614868\n",
      "Step 155: BSNE_Loss=0.9198,local_loss: 0.2668397522137193,global_loss: 0.6529845595359802\n",
      "Step 156: BSNE_Loss=0.9913,local_loss: 0.02783209902847169,global_loss: 0.9634820222854614\n",
      "Step 157: BSNE_Loss=1.1897,local_loss: 0.052579053465758155,global_loss: 1.1371272802352905\n",
      "Step 158: BSNE_Loss=0.6420,local_loss: 0.03790860074760845,global_loss: 0.604066789150238\n",
      "Step 159: BSNE_Loss=0.9608,local_loss: 0.06620902327001116,global_loss: 0.8946132659912109\n",
      "Step 160: BSNE_Loss=0.2483,local_loss: 0.23088948486779276,global_loss: 0.017404211685061455\n",
      "Step 161: BSNE_Loss=0.5718,local_loss: 0.11493911493840361,global_loss: 0.4568989872932434\n",
      "Step 162: BSNE_Loss=1.0069,local_loss: 0.11255271082537524,global_loss: 0.894328236579895\n",
      "Step 163: BSNE_Loss=0.4856,local_loss: -0.01648021243151069,global_loss: 0.5020825862884521\n",
      "Step 164: BSNE_Loss=1.1162,local_loss: 0.005807451468835554,global_loss: 1.1104320287704468\n",
      "Step 165: BSNE_Loss=0.7617,local_loss: 0.12684922552947792,global_loss: 0.6348985433578491\n",
      "Step 166: BSNE_Loss=0.5796,local_loss: -3.402502241823591e-05,global_loss: 0.5796144008636475\n",
      "Step 167: BSNE_Loss=0.5492,local_loss: 0.1710410067501999,global_loss: 0.3781784176826477\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 168: BSNE_Loss=0.5825,local_loss: 0.06991417909439512,global_loss: 0.5126290321350098\n",
      "Step 169: BSNE_Loss=1.0390,local_loss: 0.12892883752610634,global_loss: 0.910078227519989\n",
      "Step 170: BSNE_Loss=0.8909,local_loss: 0.43862600938519536,global_loss: 0.4522862434387207\n",
      "Step 171: BSNE_Loss=0.7952,local_loss: 0.02890657571141099,global_loss: 0.7663054466247559\n",
      "Step 172: BSNE_Loss=0.7636,local_loss: 0.08460142891930399,global_loss: 0.6789619326591492\n",
      "Step 173: BSNE_Loss=1.2076,local_loss: 0.08660556010484138,global_loss: 1.1210205554962158\n",
      "Step 174: BSNE_Loss=0.6724,local_loss: 0.28244810802044756,global_loss: 0.38997578620910645\n",
      "Step 175: BSNE_Loss=0.7466,local_loss: 0.03159056452404796,global_loss: 0.7149936556816101\n",
      "Step 176: BSNE_Loss=0.9798,local_loss: 0.24922937812815993,global_loss: 0.7305957078933716\n",
      "Step 177: BSNE_Loss=1.0689,local_loss: 0.06789117963317659,global_loss: 1.0009921789169312\n",
      "Step 178: BSNE_Loss=0.6309,local_loss: 0.17398460517900158,global_loss: 0.45695653557777405\n",
      "Step 179: BSNE_Loss=0.8274,local_loss: 0.15722170836557894,global_loss: 0.6702027916908264\n",
      "Step 180: BSNE_Loss=0.5871,local_loss: 0.21189207595723203,global_loss: 0.3752121329307556\n",
      "Step 181: BSNE_Loss=0.6531,local_loss: 0.03584128980556144,global_loss: 0.6172646284103394\n",
      "Step 182: BSNE_Loss=0.4887,local_loss: 0.01752075799736949,global_loss: 0.47119301557540894\n",
      "Step 183: BSNE_Loss=0.5455,local_loss: 0.08639505693566861,global_loss: 0.4590878188610077\n",
      "Step 184: BSNE_Loss=0.8029,local_loss: 0.17024838693881453,global_loss: 0.6326060891151428\n",
      "Step 185: BSNE_Loss=1.4415,local_loss: -0.06199200488665119,global_loss: 1.503470778465271\n",
      "Step 186: BSNE_Loss=1.2831,local_loss: 0.04916358989541003,global_loss: 1.2338910102844238\n",
      "Step 187: BSNE_Loss=0.9481,local_loss: 0.17670349024890392,global_loss: 0.7713526487350464\n",
      "Step 188: BSNE_Loss=1.0416,local_loss: 0.1369752199297604,global_loss: 0.9046608209609985\n",
      "Step 189: BSNE_Loss=0.7485,local_loss: 0.1609222701394176,global_loss: 0.5876134634017944\n",
      "Step 190: BSNE_Loss=1.0126,local_loss: 0.1988392599655348,global_loss: 0.8137543201446533\n",
      "Step 191: BSNE_Loss=0.8501,local_loss: 0.21682274931701287,global_loss: 0.6332595348358154\n",
      "Step 192: BSNE_Loss=0.8456,local_loss: 0.19672055200002678,global_loss: 0.6488767266273499\n",
      "Step 193: BSNE_Loss=0.8214,local_loss: 0.11013715174816877,global_loss: 0.7113046646118164\n",
      "Step 194: BSNE_Loss=0.9517,local_loss: 0.08679917070598059,global_loss: 0.8649446368217468\n",
      "Step 195: BSNE_Loss=0.6308,local_loss: 0.15568433696034892,global_loss: 0.4750722050666809\n",
      "Step 196: BSNE_Loss=0.5927,local_loss: 0.05605599244554468,global_loss: 0.5366515517234802\n",
      "Step 197: BSNE_Loss=0.8147,local_loss: 0.1368283506973288,global_loss: 0.6778790354728699\n",
      "Step 198: BSNE_Loss=0.8243,local_loss: 0.11457312078868018,global_loss: 0.7097184658050537\n",
      "Step 199: BSNE_Loss=0.9137,local_loss: -0.017275926088258783,global_loss: 0.9310182332992554\n",
      "Step 200: BSNE_Loss=0.9012,local_loss: 0.05638976778813985,global_loss: 0.8447624444961548\n",
      "Step 201: BSNE_Loss=0.9025,local_loss: 0.32372340225344653,global_loss: 0.5787371397018433\n",
      "Step 202: BSNE_Loss=0.8552,local_loss: 0.2570862396198347,global_loss: 0.5981234312057495\n",
      "Step 203: BSNE_Loss=0.9357,local_loss: -0.049230744886768055,global_loss: 0.9849497675895691\n",
      "Step 204: BSNE_Loss=0.9123,local_loss: 0.0750291130073952,global_loss: 0.8373175859451294\n",
      "Step 205: BSNE_Loss=1.2376,local_loss: 0.10419680860915072,global_loss: 1.1334125995635986\n",
      "Step 206: BSNE_Loss=0.8996,local_loss: 0.10846465137473299,global_loss: 0.7910935282707214\n",
      "Step 207: BSNE_Loss=0.9648,local_loss: -0.0038637523426101497,global_loss: 0.968674898147583\n",
      "Step 208: BSNE_Loss=0.8656,local_loss: 0.13465776499303975,global_loss: 0.7309849858283997\n",
      "Step 209: BSNE_Loss=0.9946,local_loss: 0.14641191737745848,global_loss: 0.8481897711753845\n",
      "Step 210: BSNE_Loss=0.9954,local_loss: 0.17636788152255672,global_loss: 0.8190721869468689\n",
      "Step 211: BSNE_Loss=0.6755,local_loss: 0.10076353197014071,global_loss: 0.574762761592865\n",
      "Step 212: BSNE_Loss=0.7765,local_loss: 0.20997515310250423,global_loss: 0.5665035247802734\n",
      "Step 213: BSNE_Loss=1.0583,local_loss: 0.10662572075428418,global_loss: 0.9516708850860596\n",
      "Step 214: BSNE_Loss=1.1515,local_loss: 0.1746767737758338,global_loss: 0.976851224899292\n",
      "Step 215: BSNE_Loss=0.9045,local_loss: 0.169360420977635,global_loss: 0.7351281046867371\n",
      "Step 216: BSNE_Loss=0.7721,local_loss: 0.0795304066586777,global_loss: 0.6925826072692871\n",
      "Step 217: BSNE_Loss=0.5811,local_loss: 0.07717279327047762,global_loss: 0.5039223432540894\n",
      "Step 218: BSNE_Loss=0.6073,local_loss: 0.1024785496569567,global_loss: 0.5047804117202759\n",
      "Step 219: BSNE_Loss=0.9507,local_loss: 0.07720985563928717,global_loss: 0.8734416961669922\n",
      "Step 220: BSNE_Loss=1.0020,local_loss: 0.35074168201614103,global_loss: 0.6512523889541626\n",
      "Step 221: BSNE_Loss=0.8498,local_loss: 0.1257925704091968,global_loss: 0.724033534526825\n",
      "Step 222: BSNE_Loss=0.6088,local_loss: 0.2056717409925511,global_loss: 0.4031037986278534\n",
      "Step 223: BSNE_Loss=0.9213,local_loss: 0.06406254631255587,global_loss: 0.857284665107727\n",
      "Step 224: BSNE_Loss=0.6983,local_loss: 0.21342849427490207,global_loss: 0.4848679304122925\n",
      "Step 225: BSNE_Loss=0.8607,local_loss: 0.11080570673065135,global_loss: 0.749930202960968\n",
      "Step 226: BSNE_Loss=0.4255,local_loss: 0.30240212535522454,global_loss: 0.12306291610002518\n",
      "Step 227: BSNE_Loss=1.2270,local_loss: 0.19092103376416458,global_loss: 1.0360395908355713\n",
      "Step 228: BSNE_Loss=0.9433,local_loss: 0.17783605330501398,global_loss: 0.7654404044151306\n",
      "Step 229: BSNE_Loss=0.5430,local_loss: 0.08850644791307868,global_loss: 0.45452433824539185\n",
      "Step 230: BSNE_Loss=0.7336,local_loss: 0.011762185890443684,global_loss: 0.7218263149261475\n",
      "Step 231: BSNE_Loss=0.6617,local_loss: 0.14436293079797083,global_loss: 0.5173574686050415\n",
      "Step 232: BSNE_Loss=0.5694,local_loss: 0.18286107736405785,global_loss: 0.38652878999710083\n",
      "Step 233: BSNE_Loss=0.6199,local_loss: 0.2749449901661373,global_loss: 0.3449138104915619\n",
      "Step 234: BSNE_Loss=0.4407,local_loss: 0.1383959749473927,global_loss: 0.302314430475235\n",
      "Step 235: BSNE_Loss=0.3741,local_loss: 0.16012790343753155,global_loss: 0.21394014358520508\n",
      "Step 236: BSNE_Loss=0.4235,local_loss: 0.06908194036338626,global_loss: 0.35445666313171387\n",
      "Step 237: BSNE_Loss=0.5713,local_loss: 0.1913387203448729,global_loss: 0.3799501657485962\n",
      "Step 238: BSNE_Loss=0.5661,local_loss: 0.3065428051323709,global_loss: 0.25959888100624084\n",
      "Step 239: BSNE_Loss=0.5655,local_loss: 0.3977698235577924,global_loss: 0.1677798330783844\n",
      "Step 240: BSNE_Loss=0.4950,local_loss: 0.27814554730937624,global_loss: 0.21680474281311035\n",
      "Step 241: BSNE_Loss=0.7974,local_loss: 0.1785645150813997,global_loss: 0.61887526512146\n",
      "Step 242: BSNE_Loss=0.6965,local_loss: 0.15731998795608418,global_loss: 0.539135217666626\n",
      "Step 243: BSNE_Loss=0.5339,local_loss: 0.06781855152134669,global_loss: 0.466067373752594\n",
      "Step 244: BSNE_Loss=0.3202,local_loss: 0.1825353508635624,global_loss: 0.13770568370819092\n",
      "Step 245: BSNE_Loss=0.3583,local_loss: 0.20567450890814404,global_loss: 0.15267163515090942\n",
      "Step 246: BSNE_Loss=0.2098,local_loss: 0.5485338953123398,global_loss: -0.3387722373008728\n",
      "Step 247: BSNE_Loss=1.1750,local_loss: 0.7602297901954629,global_loss: 0.41474637389183044\n",
      "Step 248: BSNE_Loss=0.3166,local_loss: 0.23444127873397427,global_loss: 0.08213569968938828\n",
      "Step 249: BSNE_Loss=0.2934,local_loss: 0.23169755603106373,global_loss: 0.0616699755191803\n",
      "Step 250: BSNE_Loss=0.5564,local_loss: 0.48870778495742323,global_loss: 0.06772487610578537\n",
      "Step 251: BSNE_Loss=0.3659,local_loss: 0.20379918523355003,global_loss: 0.16214895248413086\n",
      "Step 252: BSNE_Loss=-0.1009,local_loss: 0.7952566478579155,global_loss: -0.8961529731750488\n",
      "Center node 2 finished at step 253 with loss -0.1009\n",
      "\n",
      "=== Pretraining Epoch 3 (Center Node: 3) ===\n",
      "Step 0: BSNE_Loss=1.4838,local_loss: 1.0141866215829254,global_loss: 0.4696160554885864\n",
      "Step 1: BSNE_Loss=0.8264,local_loss: 0.1808109767186258,global_loss: 0.6455422639846802\n",
      "Step 2: BSNE_Loss=0.5625,local_loss: 0.03748975068424446,global_loss: 0.5250281095504761\n",
      "Step 3: BSNE_Loss=1.0599,local_loss: 0.28643774566583247,global_loss: 0.7734578251838684\n",
      "Step 4: BSNE_Loss=1.0447,local_loss: 0.16912975144931644,global_loss: 0.8755312561988831\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 5: BSNE_Loss=0.6059,local_loss: 0.013418731317550012,global_loss: 0.5925118923187256\n",
      "Step 6: BSNE_Loss=0.7477,local_loss: 0.36864509713367855,global_loss: 0.37904056906700134\n",
      "Step 7: BSNE_Loss=0.9551,local_loss: 0.0572363060783985,global_loss: 0.8979034423828125\n",
      "Step 8: BSNE_Loss=1.1522,local_loss: 0.14241076260101024,global_loss: 1.0098053216934204\n",
      "Step 9: BSNE_Loss=0.7450,local_loss: 0.04906272187376164,global_loss: 0.6959501504898071\n",
      "Step 10: BSNE_Loss=0.8140,local_loss: 0.0447280913011471,global_loss: 0.7692532539367676\n",
      "Step 11: BSNE_Loss=1.2657,local_loss: 0.12423328445635592,global_loss: 1.141431450843811\n",
      "Step 12: BSNE_Loss=0.5654,local_loss: 0.00041893102116922874,global_loss: 0.5650127530097961\n",
      "Step 13: BSNE_Loss=1.1066,local_loss: 0.05408664798028411,global_loss: 1.052522897720337\n",
      "Step 14: BSNE_Loss=0.6623,local_loss: 0.2026283602831749,global_loss: 0.4596583843231201\n",
      "Step 15: BSNE_Loss=0.9487,local_loss: -0.0109564367196581,global_loss: 0.9596940279006958\n",
      "Step 16: BSNE_Loss=0.9730,local_loss: 0.09299387054782304,global_loss: 0.8800449371337891\n",
      "Step 17: BSNE_Loss=1.2879,local_loss: 0.19594194850947894,global_loss: 1.0919110774993896\n",
      "Step 18: BSNE_Loss=0.8081,local_loss: 0.09611143282171306,global_loss: 0.7120128870010376\n",
      "Step 19: BSNE_Loss=1.0944,local_loss: 0.0968548357064882,global_loss: 0.997573733329773\n",
      "Step 20: BSNE_Loss=1.0042,local_loss: 0.13350204275404237,global_loss: 0.8706538677215576\n",
      "Step 21: BSNE_Loss=0.6781,local_loss: 0.07816651866887792,global_loss: 0.599917471408844\n",
      "Step 22: BSNE_Loss=1.0488,local_loss: 0.2231000488693692,global_loss: 0.8256597518920898\n",
      "Step 23: BSNE_Loss=1.1229,local_loss: 0.27392579673858736,global_loss: 0.8490141034126282\n",
      "Step 24: BSNE_Loss=0.3139,local_loss: 0.05322103387996385,global_loss: 0.2606407105922699\n",
      "Step 25: BSNE_Loss=1.3035,local_loss: 0.23840360224927803,global_loss: 1.0651429891586304\n",
      "Step 26: BSNE_Loss=1.0889,local_loss: 0.1469417100359655,global_loss: 0.9419980049133301\n",
      "Step 27: BSNE_Loss=1.1191,local_loss: -0.027089710946849352,global_loss: 1.146165132522583\n",
      "Step 28: BSNE_Loss=1.2066,local_loss: 0.09736661048536473,global_loss: 1.1091859340667725\n",
      "Step 29: BSNE_Loss=1.3092,local_loss: 0.17944569610327185,global_loss: 1.129706621170044\n",
      "Step 30: BSNE_Loss=1.3518,local_loss: 0.07451020181184449,global_loss: 1.277297019958496\n",
      "Step 31: BSNE_Loss=0.8424,local_loss: 0.12879376505565776,global_loss: 0.7135939002037048\n",
      "Step 32: BSNE_Loss=1.1247,local_loss: 0.0990667791271368,global_loss: 1.025625467300415\n",
      "Step 33: BSNE_Loss=1.2705,local_loss: -0.011712709425545302,global_loss: 1.2822015285491943\n",
      "Step 34: BSNE_Loss=0.7584,local_loss: 0.238578588300472,global_loss: 0.5198234915733337\n",
      "Step 35: BSNE_Loss=0.6087,local_loss: 0.06691472417951788,global_loss: 0.5417582392692566\n",
      "Step 36: BSNE_Loss=1.0526,local_loss: 0.1600656267144391,global_loss: 0.8925467729568481\n",
      "Step 37: BSNE_Loss=0.6615,local_loss: 0.000496061519846105,global_loss: 0.6610166430473328\n",
      "Step 38: BSNE_Loss=0.5526,local_loss: 0.10770353493563262,global_loss: 0.44494083523750305\n",
      "Step 39: BSNE_Loss=0.9368,local_loss: 0.17519852116005338,global_loss: 0.7615734338760376\n",
      "Step 40: BSNE_Loss=1.0452,local_loss: 0.06642261212345943,global_loss: 0.9787492752075195\n",
      "Step 41: BSNE_Loss=0.9770,local_loss: 0.08330232653697824,global_loss: 0.8936870694160461\n",
      "Step 42: BSNE_Loss=0.9030,local_loss: 0.05485910463359853,global_loss: 0.8481138944625854\n",
      "Step 43: BSNE_Loss=0.6701,local_loss: 0.1218581208061524,global_loss: 0.5482397079467773\n",
      "Step 44: BSNE_Loss=1.1208,local_loss: 6.737712484010672e-05,global_loss: 1.120751976966858\n",
      "Step 45: BSNE_Loss=0.6786,local_loss: 0.06386704986255147,global_loss: 0.6147618293762207\n",
      "Step 46: BSNE_Loss=0.4371,local_loss: 0.048198694300656514,global_loss: 0.3889194130897522\n",
      "Step 47: BSNE_Loss=0.9349,local_loss: 0.09030999140730477,global_loss: 0.844591498374939\n",
      "Step 48: BSNE_Loss=1.2789,local_loss: 0.12610673528084113,global_loss: 1.1527541875839233\n",
      "Step 49: BSNE_Loss=0.9498,local_loss: 0.08962447628964956,global_loss: 0.8601648807525635\n",
      "Step 50: BSNE_Loss=0.9371,local_loss: 0.12031180866527813,global_loss: 0.8167577385902405\n",
      "Step 51: BSNE_Loss=1.0894,local_loss: 0.1744580334991233,global_loss: 0.9149096012115479\n",
      "Step 52: BSNE_Loss=1.4174,local_loss: 0.09690432340818457,global_loss: 1.3204840421676636\n",
      "Step 53: BSNE_Loss=0.4671,local_loss: 0.07399673458953562,global_loss: 0.3930785357952118\n",
      "Step 54: BSNE_Loss=0.8575,local_loss: 0.06946659003340294,global_loss: 0.7879943251609802\n",
      "Step 55: BSNE_Loss=1.3384,local_loss: 0.15865611632132942,global_loss: 1.1797842979431152\n",
      "Step 56: BSNE_Loss=0.6524,local_loss: 0.11687964132329924,global_loss: 0.5354891419410706\n",
      "Step 57: BSNE_Loss=0.9237,local_loss: 0.05249102464580098,global_loss: 0.8711655139923096\n",
      "Step 58: BSNE_Loss=0.9976,local_loss: 0.10242213795500096,global_loss: 0.8951455950737\n",
      "Step 59: BSNE_Loss=1.1562,local_loss: 0.08318101316249919,global_loss: 1.072993516921997\n",
      "Step 60: BSNE_Loss=0.9390,local_loss: 0.18185678338041936,global_loss: 0.75711989402771\n",
      "Step 61: BSNE_Loss=0.9260,local_loss: 0.14851740067175348,global_loss: 0.777529239654541\n",
      "Step 62: BSNE_Loss=1.3173,local_loss: 0.07347234194785277,global_loss: 1.2438031435012817\n",
      "Step 63: BSNE_Loss=0.5845,local_loss: 0.1926554553450002,global_loss: 0.39188307523727417\n",
      "Step 64: BSNE_Loss=1.1042,local_loss: 0.02217533597101992,global_loss: 1.082067608833313\n",
      "Step 65: BSNE_Loss=0.8651,local_loss: 0.154232727716893,global_loss: 0.7108761072158813\n",
      "Step 66: BSNE_Loss=0.6885,local_loss: 0.034744711047008894,global_loss: 0.6538012623786926\n",
      "Step 67: BSNE_Loss=0.6624,local_loss: 0.05639418000215665,global_loss: 0.6060301661491394\n",
      "Step 68: BSNE_Loss=0.5928,local_loss: 0.09881248210624918,global_loss: 0.4939650297164917\n",
      "Step 69: BSNE_Loss=0.7181,local_loss: 0.11591038867409402,global_loss: 0.6021732091903687\n",
      "Step 70: BSNE_Loss=0.7717,local_loss: 0.018577323773175225,global_loss: 0.7530763745307922\n",
      "Step 71: BSNE_Loss=0.9026,local_loss: 0.020862535800039144,global_loss: 0.8817694783210754\n",
      "Step 72: BSNE_Loss=0.7302,local_loss: 0.18338168948564615,global_loss: 0.5467690229415894\n",
      "Step 73: BSNE_Loss=0.7696,local_loss: -0.0013533272473338087,global_loss: 0.7709038853645325\n",
      "Step 74: BSNE_Loss=1.0907,local_loss: 0.11093673695438483,global_loss: 0.9797165393829346\n",
      "Step 75: BSNE_Loss=0.8892,local_loss: 0.04434401502177952,global_loss: 0.8448665738105774\n",
      "Step 76: BSNE_Loss=0.9533,local_loss: 0.14929927570493623,global_loss: 0.8039694428443909\n",
      "Step 77: BSNE_Loss=1.0085,local_loss: 0.10512991185085963,global_loss: 0.9034096002578735\n",
      "Step 78: BSNE_Loss=0.3971,local_loss: 0.061283800466563614,global_loss: 0.33578866720199585\n",
      "Step 79: BSNE_Loss=1.0890,local_loss: 0.15603873310966238,global_loss: 0.933002233505249\n",
      "Step 80: BSNE_Loss=0.6380,local_loss: -0.019008823418041273,global_loss: 0.6569924354553223\n",
      "Step 81: BSNE_Loss=0.7679,local_loss: 0.05426021565853514,global_loss: 0.7136513590812683\n",
      "Step 82: BSNE_Loss=0.9795,local_loss: 0.11545434947830797,global_loss: 0.8640445470809937\n",
      "Step 83: BSNE_Loss=1.0179,local_loss: 0.1009264353025925,global_loss: 0.9169871807098389\n",
      "Step 84: BSNE_Loss=0.9672,local_loss: 0.09014083113807651,global_loss: 0.8770216703414917\n",
      "Step 85: BSNE_Loss=0.5784,local_loss: 0.0821522314619832,global_loss: 0.49620985984802246\n",
      "Step 86: BSNE_Loss=0.6943,local_loss: 0.09276719595021705,global_loss: 0.6015084385871887\n",
      "Step 87: BSNE_Loss=1.0907,local_loss: 0.3180830510717101,global_loss: 0.7726229429244995\n",
      "Step 88: BSNE_Loss=0.8585,local_loss: 0.20830737478919226,global_loss: 0.6501895189285278\n",
      "Step 89: BSNE_Loss=0.8056,local_loss: 0.17367383967497677,global_loss: 0.631951630115509\n",
      "Step 90: BSNE_Loss=0.8214,local_loss: 0.2884299609726057,global_loss: 0.5330159068107605\n",
      "Step 91: BSNE_Loss=1.3096,local_loss: 0.12591250885671135,global_loss: 1.183728575706482\n",
      "Step 92: BSNE_Loss=0.8527,local_loss: 0.12028444932638238,global_loss: 0.7324166893959045\n",
      "Step 93: BSNE_Loss=1.2211,local_loss: 0.1707622559482542,global_loss: 1.0502921342849731\n",
      "Step 94: BSNE_Loss=0.9666,local_loss: 0.20114748879576952,global_loss: 0.7654141187667847\n",
      "Step 95: BSNE_Loss=0.7498,local_loss: 0.13861736472843325,global_loss: 0.6111373901367188\n",
      "Step 96: BSNE_Loss=1.3156,local_loss: 0.3701532373210236,global_loss: 0.9454489350318909\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 97: BSNE_Loss=0.7292,local_loss: 0.10400995493752724,global_loss: 0.6251636743545532\n",
      "Step 98: BSNE_Loss=0.8919,local_loss: 0.0331183881767579,global_loss: 0.8587558269500732\n",
      "Step 99: BSNE_Loss=1.0494,local_loss: 0.142801788582233,global_loss: 0.9066387414932251\n",
      "Step 100: BSNE_Loss=0.6929,local_loss: 0.2114681360917409,global_loss: 0.48141589760780334\n",
      "Step 101: BSNE_Loss=0.8115,local_loss: 0.3146782336454335,global_loss: 0.4967752993106842\n",
      "Step 102: BSNE_Loss=1.0424,local_loss: 0.12048353947630723,global_loss: 0.92193603515625\n",
      "Step 103: BSNE_Loss=0.6924,local_loss: 0.13194881367382927,global_loss: 0.5604453682899475\n",
      "Step 104: BSNE_Loss=0.8613,local_loss: 0.13989440995130636,global_loss: 0.7214425802230835\n",
      "Step 105: BSNE_Loss=0.8887,local_loss: 0.20230031198600157,global_loss: 0.6864269375801086\n",
      "Step 106: BSNE_Loss=1.1304,local_loss: 0.16535343455431303,global_loss: 0.9650436639785767\n",
      "Step 107: BSNE_Loss=0.6400,local_loss: 0.19150856292983115,global_loss: 0.44844353199005127\n",
      "Step 108: BSNE_Loss=0.8301,local_loss: 0.12741956615276748,global_loss: 0.7027037739753723\n",
      "Step 109: BSNE_Loss=1.2057,local_loss: 0.17903060885463945,global_loss: 1.026619553565979\n",
      "Step 110: BSNE_Loss=0.9997,local_loss: 0.20267676427535058,global_loss: 0.7970678806304932\n",
      "Step 111: BSNE_Loss=0.5054,local_loss: 0.14628045519422334,global_loss: 0.3591010570526123\n",
      "Step 112: BSNE_Loss=0.7992,local_loss: 0.18893692311243943,global_loss: 0.6103091835975647\n",
      "Step 113: BSNE_Loss=0.7704,local_loss: 0.2595751133386799,global_loss: 0.5108418464660645\n",
      "Step 114: BSNE_Loss=1.4309,local_loss: 0.12313159315682418,global_loss: 1.3077876567840576\n",
      "Step 115: BSNE_Loss=0.8167,local_loss: 0.1284225425796478,global_loss: 0.6883252859115601\n",
      "Step 116: BSNE_Loss=0.8055,local_loss: 0.09647104248904795,global_loss: 0.7089875936508179\n",
      "Step 117: BSNE_Loss=0.7966,local_loss: 0.15608566170614782,global_loss: 0.6405594944953918\n",
      "Step 118: BSNE_Loss=0.8149,local_loss: 0.2632005606517594,global_loss: 0.5517168045043945\n",
      "Step 119: BSNE_Loss=0.7656,local_loss: 0.2835815800064397,global_loss: 0.4820168614387512\n",
      "Step 120: BSNE_Loss=0.7994,local_loss: 0.17310739745115555,global_loss: 0.6262853741645813\n",
      "Step 121: BSNE_Loss=1.4481,local_loss: 0.21750819953421222,global_loss: 1.2306276559829712\n",
      "Step 122: BSNE_Loss=1.0502,local_loss: 0.14803706708691833,global_loss: 0.9021543264389038\n",
      "Step 123: BSNE_Loss=0.4273,local_loss: 0.08882077106105606,global_loss: 0.33846452832221985\n",
      "Step 124: BSNE_Loss=1.0511,local_loss: 0.16071461973943257,global_loss: 0.8903893232345581\n",
      "Step 125: BSNE_Loss=1.3882,local_loss: 0.26015655195596016,global_loss: 1.1280725002288818\n",
      "Step 126: BSNE_Loss=0.9601,local_loss: 0.18480009649832524,global_loss: 0.7752764225006104\n",
      "Step 127: BSNE_Loss=0.6622,local_loss: 0.20409938313146792,global_loss: 0.4580788016319275\n",
      "Step 128: BSNE_Loss=0.5863,local_loss: 0.025329935107607487,global_loss: 0.5610154271125793\n",
      "Step 129: BSNE_Loss=1.0206,local_loss: 0.1618102643411251,global_loss: 0.8587770462036133\n",
      "Step 130: BSNE_Loss=1.0783,local_loss: 0.09131073826296984,global_loss: 0.9869781732559204\n",
      "Step 131: BSNE_Loss=1.0921,local_loss: 0.21288761074615498,global_loss: 0.8791967630386353\n",
      "Step 132: BSNE_Loss=0.3860,local_loss: 0.16986055150272908,global_loss: 0.2161722630262375\n",
      "Step 133: BSNE_Loss=0.5142,local_loss: 0.21848982134761283,global_loss: 0.29572996497154236\n",
      "Step 134: BSNE_Loss=0.8350,local_loss: 0.23313014282644223,global_loss: 0.6018215417861938\n",
      "Step 135: BSNE_Loss=1.3135,local_loss: 0.2821914929284933,global_loss: 1.031267523765564\n",
      "Step 136: BSNE_Loss=0.7222,local_loss: 0.1149211347528631,global_loss: 0.6073182225227356\n",
      "Step 137: BSNE_Loss=1.1863,local_loss: 0.16038865497250648,global_loss: 1.0258822441101074\n",
      "Step 138: BSNE_Loss=0.6461,local_loss: 0.011231121291111883,global_loss: 0.6348903775215149\n",
      "Step 139: BSNE_Loss=0.8206,local_loss: 0.258491625043765,global_loss: 0.562078595161438\n",
      "Step 140: BSNE_Loss=0.8194,local_loss: 0.24953270453456441,global_loss: 0.5698966383934021\n",
      "Step 141: BSNE_Loss=0.7565,local_loss: 0.14140744324942053,global_loss: 0.6150849461555481\n",
      "Step 142: BSNE_Loss=0.8981,local_loss: 0.32764262174393977,global_loss: 0.5704852342605591\n",
      "Step 143: BSNE_Loss=0.6546,local_loss: 0.11331017373760238,global_loss: 0.5412437915802002\n",
      "Step 144: BSNE_Loss=0.8518,local_loss: 0.20651399896406947,global_loss: 0.6452534198760986\n",
      "Step 145: BSNE_Loss=1.0090,local_loss: 0.06696616490581846,global_loss: 0.9420560002326965\n",
      "Step 146: BSNE_Loss=1.2423,local_loss: 0.1753461778151141,global_loss: 1.0669046640396118\n",
      "Step 147: BSNE_Loss=0.7975,local_loss: -0.012117780930770544,global_loss: 0.8095700144767761\n",
      "Step 148: BSNE_Loss=1.2016,local_loss: 0.197529681702246,global_loss: 1.0040615797042847\n",
      "Step 149: BSNE_Loss=1.0845,local_loss: 0.08295268547379282,global_loss: 1.0015597343444824\n",
      "Step 150: BSNE_Loss=0.7364,local_loss: 0.0810280698064726,global_loss: 0.6554040908813477\n",
      "Step 151: BSNE_Loss=0.5505,local_loss: 0.11387975597003308,global_loss: 0.43664875626564026\n",
      "Step 152: BSNE_Loss=0.9193,local_loss: 0.026731854648732335,global_loss: 0.8925568461418152\n",
      "Step 153: BSNE_Loss=0.8178,local_loss: 0.18570032571027884,global_loss: 0.6321163177490234\n",
      "Step 154: BSNE_Loss=0.9157,local_loss: 0.02485826796536955,global_loss: 0.890831470489502\n",
      "Step 155: BSNE_Loss=0.9655,local_loss: 0.08389662552350817,global_loss: 0.8816444873809814\n",
      "Step 156: BSNE_Loss=0.7450,local_loss: 0.06456480407466188,global_loss: 0.6804695725440979\n",
      "Step 157: BSNE_Loss=0.4503,local_loss: 0.09767064519060212,global_loss: 0.35265201330184937\n",
      "Step 158: BSNE_Loss=0.4568,local_loss: 0.14165896741295933,global_loss: 0.3151147961616516\n",
      "Step 159: BSNE_Loss=0.6759,local_loss: 0.08764732189540597,global_loss: 0.5882230997085571\n",
      "Step 160: BSNE_Loss=0.8666,local_loss: 0.18044142175663172,global_loss: 0.6861552596092224\n",
      "Step 161: BSNE_Loss=0.7697,local_loss: 0.15858310757081703,global_loss: 0.6110891103744507\n",
      "Step 162: BSNE_Loss=0.9406,local_loss: 0.11017535105982887,global_loss: 0.8303850889205933\n",
      "Step 163: BSNE_Loss=0.4389,local_loss: 0.14394397880204327,global_loss: 0.294973224401474\n",
      "Step 164: BSNE_Loss=0.8119,local_loss: 0.0453213595990651,global_loss: 0.7665912508964539\n",
      "Step 165: BSNE_Loss=0.5287,local_loss: 0.0330337725339513,global_loss: 0.4956378936767578\n",
      "Step 166: BSNE_Loss=0.6099,local_loss: 0.022980442685632083,global_loss: 0.586890697479248\n",
      "Step 167: BSNE_Loss=1.4151,local_loss: 0.16141476603587068,global_loss: 1.2537283897399902\n",
      "Step 168: BSNE_Loss=0.9728,local_loss: 0.03275210259462316,global_loss: 0.9400510787963867\n",
      "Step 169: BSNE_Loss=0.9426,local_loss: 0.05934237035408739,global_loss: 0.8832100629806519\n",
      "Step 170: BSNE_Loss=0.5842,local_loss: 0.12368603331890853,global_loss: 0.46053409576416016\n",
      "Step 171: BSNE_Loss=0.8360,local_loss: -0.02758906282929493,global_loss: 0.8636322617530823\n",
      "Step 172: BSNE_Loss=0.4453,local_loss: 0.22665064055461337,global_loss: 0.21861480176448822\n",
      "Step 173: BSNE_Loss=0.9371,local_loss: 0.13158330608460797,global_loss: 0.8054844737052917\n",
      "Step 174: BSNE_Loss=0.5796,local_loss: -0.0008280937266966327,global_loss: 0.5804021954536438\n",
      "Step 175: BSNE_Loss=1.1469,local_loss: 0.12040556234815292,global_loss: 1.0264575481414795\n",
      "Step 176: BSNE_Loss=0.6724,local_loss: 0.07773405596490374,global_loss: 0.5946615934371948\n",
      "Step 177: BSNE_Loss=0.8288,local_loss: 0.1391810707999243,global_loss: 0.6896349191665649\n",
      "Step 178: BSNE_Loss=0.5308,local_loss: 0.17304702167827446,global_loss: 0.35777151584625244\n",
      "Step 179: BSNE_Loss=0.6279,local_loss: 0.1908392560303971,global_loss: 0.4370798170566559\n",
      "Step 180: BSNE_Loss=0.7896,local_loss: 0.15820624675356684,global_loss: 0.6313945651054382\n",
      "Step 181: BSNE_Loss=1.2125,local_loss: 0.1929479659786836,global_loss: 1.0195834636688232\n",
      "Step 182: BSNE_Loss=0.9277,local_loss: 0.26187895264424205,global_loss: 0.6657739281654358\n",
      "Step 183: BSNE_Loss=0.8947,local_loss: 0.15951409868571303,global_loss: 0.7352297306060791\n",
      "Step 184: BSNE_Loss=0.5303,local_loss: 0.08956860462176415,global_loss: 0.4407561719417572\n",
      "Step 185: BSNE_Loss=0.8583,local_loss: 0.203239299100528,global_loss: 0.6550953388214111\n",
      "Step 186: BSNE_Loss=0.6337,local_loss: 0.08594380448463813,global_loss: 0.5477263927459717\n",
      "Step 187: BSNE_Loss=1.0153,local_loss: 0.20880139957544377,global_loss: 0.806455671787262\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 188: BSNE_Loss=0.2901,local_loss: 0.1993209047100981,global_loss: 0.09077256917953491\n",
      "Step 189: BSNE_Loss=0.5547,local_loss: 0.1384941749439238,global_loss: 0.4162035584449768\n",
      "Step 190: BSNE_Loss=0.5198,local_loss: 0.1017927110890497,global_loss: 0.41804975271224976\n",
      "Step 191: BSNE_Loss=0.5536,local_loss: 0.13230867225549328,global_loss: 0.4213191568851471\n",
      "Step 192: BSNE_Loss=0.7519,local_loss: 0.11392337680121628,global_loss: 0.6379410028457642\n",
      "Step 193: BSNE_Loss=0.3015,local_loss: 0.20533902269900664,global_loss: 0.09611713886260986\n",
      "Step 194: BSNE_Loss=1.0714,local_loss: 0.17152936686111556,global_loss: 0.8998386859893799\n",
      "Step 195: BSNE_Loss=0.7465,local_loss: 0.09441689569670984,global_loss: 0.6520413756370544\n",
      "Step 196: BSNE_Loss=0.6732,local_loss: 0.20541686487085592,global_loss: 0.4678129553794861\n",
      "Step 197: BSNE_Loss=0.7512,local_loss: 0.09921833500115237,global_loss: 0.6519647836685181\n",
      "Step 198: BSNE_Loss=0.7399,local_loss: -0.022633037246777583,global_loss: 0.7625744342803955\n",
      "Step 199: BSNE_Loss=0.8837,local_loss: 0.27482275059756983,global_loss: 0.6088306903839111\n",
      "Step 200: BSNE_Loss=0.6867,local_loss: 0.11030090804140752,global_loss: 0.5763635039329529\n",
      "Step 201: BSNE_Loss=0.9691,local_loss: 0.07770022380023456,global_loss: 0.8913917541503906\n",
      "Step 202: BSNE_Loss=0.8166,local_loss: 0.11992904355859342,global_loss: 0.6966761946678162\n",
      "Step 203: BSNE_Loss=1.2951,local_loss: 0.10919937520371661,global_loss: 1.1858677864074707\n",
      "Step 204: BSNE_Loss=0.8288,local_loss: 0.0645881859057946,global_loss: 0.764258861541748\n",
      "Step 205: BSNE_Loss=0.5942,local_loss: 0.03957600807238475,global_loss: 0.5546566247940063\n",
      "Step 206: BSNE_Loss=0.8154,local_loss: 0.13812820377716925,global_loss: 0.6772404313087463\n",
      "Step 207: BSNE_Loss=0.8913,local_loss: 0.19391640262643614,global_loss: 0.6974012851715088\n",
      "Step 208: BSNE_Loss=0.6800,local_loss: 0.19891091838536298,global_loss: 0.481078565120697\n",
      "Step 209: BSNE_Loss=0.8006,local_loss: 0.134033805466103,global_loss: 0.6665809750556946\n",
      "Step 210: BSNE_Loss=0.6116,local_loss: 0.1642009328668424,global_loss: 0.4473952054977417\n",
      "Step 211: BSNE_Loss=0.8122,local_loss: 0.167113630505802,global_loss: 0.6450427174568176\n",
      "Step 212: BSNE_Loss=0.6593,local_loss: 0.2637699400833716,global_loss: 0.3955472409725189\n",
      "Step 213: BSNE_Loss=0.6162,local_loss: 0.14304010379993412,global_loss: 0.47314321994781494\n",
      "Step 214: BSNE_Loss=0.8648,local_loss: 0.22541946668924065,global_loss: 0.6393811702728271\n",
      "Step 215: BSNE_Loss=0.5421,local_loss: 0.2700651575066991,global_loss: 0.2720170021057129\n",
      "Step 216: BSNE_Loss=0.6630,local_loss: 0.07796660889824089,global_loss: 0.5850720405578613\n",
      "Step 217: BSNE_Loss=1.2636,local_loss: 0.3260013128246046,global_loss: 0.9376433491706848\n",
      "Step 218: BSNE_Loss=0.7932,local_loss: 0.24728334422266923,global_loss: 0.5459168553352356\n",
      "Step 219: BSNE_Loss=0.9317,local_loss: 0.22139631235675605,global_loss: 0.7103061676025391\n",
      "Step 220: BSNE_Loss=0.5941,local_loss: 0.016118605817391794,global_loss: 0.5780285596847534\n",
      "Step 221: BSNE_Loss=0.8781,local_loss: 0.18625075970822255,global_loss: 0.691817581653595\n",
      "Step 222: BSNE_Loss=0.8193,local_loss: 0.13017969440004332,global_loss: 0.6890942454338074\n",
      "Step 223: BSNE_Loss=1.0600,local_loss: 0.21898351710204408,global_loss: 0.8410204648971558\n",
      "Step 224: BSNE_Loss=0.6149,local_loss: 0.2543699235429072,global_loss: 0.36054614186286926\n",
      "Step 225: BSNE_Loss=0.8690,local_loss: 0.16695028742364243,global_loss: 0.7020754218101501\n",
      "Step 226: BSNE_Loss=0.6312,local_loss: 0.23236888378100848,global_loss: 0.3988415598869324\n",
      "Step 227: BSNE_Loss=0.6588,local_loss: 0.34016819277547766,global_loss: 0.3186272084712982\n",
      "Step 228: BSNE_Loss=0.8695,local_loss: 0.061040918549005736,global_loss: 0.8084321022033691\n",
      "Step 229: BSNE_Loss=0.4681,local_loss: 0.18525986959483517,global_loss: 0.2828645408153534\n",
      "Step 230: BSNE_Loss=0.9178,local_loss: 0.3197156680131698,global_loss: 0.5980877876281738\n",
      "Step 231: BSNE_Loss=0.5882,local_loss: 0.1960216030746018,global_loss: 0.3921906054019928\n",
      "Step 232: BSNE_Loss=1.2005,local_loss: 0.0731516581895823,global_loss: 1.1273505687713623\n",
      "Step 233: BSNE_Loss=0.9120,local_loss: 0.2957187441614245,global_loss: 0.6162359714508057\n",
      "Step 234: BSNE_Loss=0.3368,local_loss: -0.013312970754488519,global_loss: 0.35009732842445374\n",
      "Step 235: BSNE_Loss=1.2986,local_loss: 0.2542791248556785,global_loss: 1.0442771911621094\n",
      "Step 236: BSNE_Loss=0.9377,local_loss: 0.12823363814544472,global_loss: 0.8094953894615173\n",
      "Step 237: BSNE_Loss=0.6119,local_loss: 0.11882464040699751,global_loss: 0.49311408400535583\n",
      "Step 238: BSNE_Loss=0.6627,local_loss: 0.05303097061610318,global_loss: 0.6096779108047485\n",
      "Step 239: BSNE_Loss=0.3949,local_loss: 0.02127647011802715,global_loss: 0.3736092746257782\n",
      "Step 240: BSNE_Loss=0.9957,local_loss: 0.23457258913306892,global_loss: 0.7611253261566162\n",
      "Step 241: BSNE_Loss=1.0212,local_loss: 0.1784994426603192,global_loss: 0.8426727056503296\n",
      "Step 242: BSNE_Loss=0.3423,local_loss: 0.216700566054437,global_loss: 0.12561815977096558\n",
      "Step 243: BSNE_Loss=0.6848,local_loss: 0.1691212305147475,global_loss: 0.5156818628311157\n",
      "Step 244: BSNE_Loss=1.3414,local_loss: 0.06962548031593047,global_loss: 1.2717630863189697\n",
      "Step 245: BSNE_Loss=1.4557,local_loss: 0.05814374898397333,global_loss: 1.3975971937179565\n",
      "Step 246: BSNE_Loss=0.2571,local_loss: 0.14257580641333506,global_loss: 0.11455132812261581\n",
      "Step 247: BSNE_Loss=0.3716,local_loss: 0.18823177888150786,global_loss: 0.18339668214321136\n",
      "Step 248: BSNE_Loss=0.8193,local_loss: 0.09791057386028694,global_loss: 0.7213667631149292\n",
      "Step 249: BSNE_Loss=0.8006,local_loss: 0.1959632624035257,global_loss: 0.6046688556671143\n",
      "Step 250: BSNE_Loss=0.5054,local_loss: 0.12429098766386133,global_loss: 0.3811453580856323\n",
      "Step 251: BSNE_Loss=0.7163,local_loss: 0.015922145531114325,global_loss: 0.7004229426383972\n",
      "Step 252: BSNE_Loss=0.8425,local_loss: 0.15299153942726842,global_loss: 0.6895491480827332\n",
      "Step 253: BSNE_Loss=0.9070,local_loss: 0.22454201646821847,global_loss: 0.6824327111244202\n",
      "Step 254: BSNE_Loss=0.8552,local_loss: 0.3197618748229209,global_loss: 0.5354560017585754\n",
      "Step 255: BSNE_Loss=0.6678,local_loss: 0.0716310048939954,global_loss: 0.5961247086524963\n",
      "Step 256: BSNE_Loss=0.5065,local_loss: 0.09332372362388303,global_loss: 0.41321080923080444\n",
      "Step 257: BSNE_Loss=0.5243,local_loss: 0.02431512933679125,global_loss: 0.5000312328338623\n",
      "Step 258: BSNE_Loss=1.0286,local_loss: 0.10428054453415088,global_loss: 0.9242725372314453\n",
      "Step 259: BSNE_Loss=0.5917,local_loss: 0.14288485185377112,global_loss: 0.4488285779953003\n",
      "Step 260: BSNE_Loss=0.4337,local_loss: 0.05462611824749777,global_loss: 0.37910568714141846\n",
      "Step 261: BSNE_Loss=0.4983,local_loss: 0.2028854094998379,global_loss: 0.2953842878341675\n",
      "Step 262: BSNE_Loss=0.9039,local_loss: 0.1516058525262019,global_loss: 0.7522501349449158\n",
      "Step 263: BSNE_Loss=0.7565,local_loss: 0.0936420053723587,global_loss: 0.6628224849700928\n",
      "Step 264: BSNE_Loss=0.6057,local_loss: 0.09900756253764931,global_loss: 0.5066539645195007\n",
      "Step 265: BSNE_Loss=0.5452,local_loss: 0.10035638531596132,global_loss: 0.4448474645614624\n",
      "Step 266: BSNE_Loss=0.3873,local_loss: 0.06409750932425676,global_loss: 0.3232496976852417\n",
      "Step 267: BSNE_Loss=0.2198,local_loss: 0.11767071688992092,global_loss: 0.10216352343559265\n",
      "Step 268: BSNE_Loss=0.6689,local_loss: 0.19375426616140515,global_loss: 0.4751400351524353\n",
      "Step 269: BSNE_Loss=0.5951,local_loss: 0.08691675179094578,global_loss: 0.5081866383552551\n",
      "Step 270: BSNE_Loss=0.6572,local_loss: 0.09506900651489708,global_loss: 0.5621792674064636\n",
      "Step 271: BSNE_Loss=0.5157,local_loss: 0.043096192376463414,global_loss: 0.47264161705970764\n",
      "Step 272: BSNE_Loss=1.4769,local_loss: 0.16161143089693197,global_loss: 1.3152542114257812\n",
      "Step 273: BSNE_Loss=0.4109,local_loss: 0.15237854444337182,global_loss: 0.2585495114326477\n",
      "Step 274: BSNE_Loss=0.4850,local_loss: 0.12961497601381447,global_loss: 0.35536107420921326\n",
      "Step 275: BSNE_Loss=0.4293,local_loss: 0.12290620999819728,global_loss: 0.3064405620098114\n",
      "Step 276: BSNE_Loss=0.2194,local_loss: 0.2514234095082023,global_loss: -0.03197840601205826\n",
      "Step 277: BSNE_Loss=0.7683,local_loss: 0.19144246463260203,global_loss: 0.5768170356750488\n",
      "Step 278: BSNE_Loss=0.5757,local_loss: 0.030118994530056613,global_loss: 0.5456000566482544\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 279: BSNE_Loss=0.1243,local_loss: 0.06785378966479748,global_loss: 0.0564294271171093\n",
      "Center node 3 finished at step 280 with loss 0.1243\n",
      "\n",
      "=== Pretraining Epoch 4 (Center Node: 4) ===\n",
      "Step 0: BSNE_Loss=0.1568,local_loss: 0.6421220539467731,global_loss: -0.48536086082458496\n",
      "Center node 4 finished at step 1 with loss 0.1568\n",
      "\n",
      "=== Pretraining Epoch 5 (Center Node: 5) ===\n",
      "Step 0: BSNE_Loss=0.6835,local_loss: 0.44404940300035933,global_loss: 0.23941798508167267\n",
      "Step 1: BSNE_Loss=0.5293,local_loss: 0.3041656331521051,global_loss: 0.22513046860694885\n",
      "Step 2: BSNE_Loss=0.8820,local_loss: 0.2744484377351882,global_loss: 0.6075494885444641\n",
      "Step 3: BSNE_Loss=1.1293,local_loss: 0.3695324824694802,global_loss: 0.7598052620887756\n",
      "Step 4: BSNE_Loss=0.4650,local_loss: 0.24287372305858604,global_loss: 0.2221461832523346\n",
      "Step 5: BSNE_Loss=1.4051,local_loss: 0.18444558590451712,global_loss: 1.2206614017486572\n",
      "Step 6: BSNE_Loss=1.1492,local_loss: 0.3012924978132527,global_loss: 0.8479353189468384\n",
      "Step 7: BSNE_Loss=0.8314,local_loss: 0.1934130870829793,global_loss: 0.6379565596580505\n",
      "Step 8: BSNE_Loss=1.0896,local_loss: 0.2967385013009031,global_loss: 0.7928248643875122\n",
      "Step 9: BSNE_Loss=1.2578,local_loss: 0.2006516545397474,global_loss: 1.0571069717407227\n",
      "Step 10: BSNE_Loss=0.9986,local_loss: 0.3135281506089921,global_loss: 0.6850581765174866\n",
      "Step 11: BSNE_Loss=0.3604,local_loss: 0.1589529689628689,global_loss: 0.20148934423923492\n",
      "Step 12: BSNE_Loss=0.5388,local_loss: 0.16674944841550926,global_loss: 0.37207022309303284\n",
      "Step 13: BSNE_Loss=0.7085,local_loss: 0.2693972406074137,global_loss: 0.43907102942466736\n",
      "Step 14: BSNE_Loss=0.7906,local_loss: 0.21807424310751056,global_loss: 0.5725288391113281\n",
      "Step 15: BSNE_Loss=0.7203,local_loss: 0.16468037086041715,global_loss: 0.5555899143218994\n",
      "Step 16: BSNE_Loss=0.5866,local_loss: 0.10358599838266798,global_loss: 0.48305320739746094\n",
      "Step 17: BSNE_Loss=0.4662,local_loss: 0.07086081727245339,global_loss: 0.39537954330444336\n",
      "Step 18: BSNE_Loss=0.6905,local_loss: 0.2395129042733891,global_loss: 0.45097655057907104\n",
      "Step 19: BSNE_Loss=0.5434,local_loss: 0.05885018180788733,global_loss: 0.48452043533325195\n",
      "Step 20: BSNE_Loss=0.5004,local_loss: 0.24068217271950076,global_loss: 0.25971758365631104\n",
      "Step 21: BSNE_Loss=0.5867,local_loss: 0.16100506634119804,global_loss: 0.42572465538978577\n",
      "Step 22: BSNE_Loss=0.7591,local_loss: 0.2125868754869248,global_loss: 0.5465248823165894\n",
      "Step 23: BSNE_Loss=0.8356,local_loss: 0.14771166681007553,global_loss: 0.6879318356513977\n",
      "Step 24: BSNE_Loss=0.6942,local_loss: 0.11723309124595535,global_loss: 0.5770111680030823\n",
      "Step 25: BSNE_Loss=0.6488,local_loss: 0.21621255784996402,global_loss: 0.43256714940071106\n",
      "Step 26: BSNE_Loss=0.7469,local_loss: 0.08137265097718693,global_loss: 0.6655502319335938\n",
      "Step 27: BSNE_Loss=0.8525,local_loss: 0.23451257130785658,global_loss: 0.6179847121238708\n",
      "Step 28: BSNE_Loss=0.7818,local_loss: 0.1835103803610819,global_loss: 0.5982541441917419\n",
      "Step 29: BSNE_Loss=0.4886,local_loss: 0.08711034190734468,global_loss: 0.4015321433544159\n",
      "Step 30: BSNE_Loss=0.5062,local_loss: 0.26521301925700647,global_loss: 0.24097178876399994\n",
      "Step 31: BSNE_Loss=0.6537,local_loss: 0.24039151658859673,global_loss: 0.4132782518863678\n",
      "Step 32: BSNE_Loss=1.1284,local_loss: 0.19499570670810112,global_loss: 0.9334191083908081\n",
      "Step 33: BSNE_Loss=0.9095,local_loss: 0.16003279736041634,global_loss: 0.749442994594574\n",
      "Step 34: BSNE_Loss=0.7515,local_loss: 0.22667156928767554,global_loss: 0.5248724222183228\n",
      "Step 35: BSNE_Loss=0.9419,local_loss: 0.16303090215561766,global_loss: 0.7788565158843994\n",
      "Step 36: BSNE_Loss=1.0234,local_loss: 0.20501591407876696,global_loss: 0.8183727264404297\n",
      "Step 37: BSNE_Loss=0.8945,local_loss: 0.22999617591918045,global_loss: 0.664520263671875\n",
      "Step 38: BSNE_Loss=1.2218,local_loss: 0.1274089162028863,global_loss: 1.0944181680679321\n",
      "Step 39: BSNE_Loss=0.6550,local_loss: 0.1701766796844345,global_loss: 0.4848673343658447\n",
      "Step 40: BSNE_Loss=0.7734,local_loss: 0.1616412936776132,global_loss: 0.6117222309112549\n",
      "Step 41: BSNE_Loss=1.2633,local_loss: 0.19918591135940344,global_loss: 1.0641101598739624\n",
      "Step 42: BSNE_Loss=0.9643,local_loss: 0.07485077326119845,global_loss: 0.8894792795181274\n",
      "Step 43: BSNE_Loss=0.9972,local_loss: 0.2251895265193049,global_loss: 0.7719979286193848\n",
      "Step 44: BSNE_Loss=0.5392,local_loss: 0.0968679158415306,global_loss: 0.44235941767692566\n",
      "Step 45: BSNE_Loss=0.9428,local_loss: 0.2096430321074262,global_loss: 0.733116865158081\n",
      "Step 46: BSNE_Loss=0.9124,local_loss: 0.17073549109802025,global_loss: 0.7416583299636841\n",
      "Step 47: BSNE_Loss=1.0434,local_loss: 0.1534112541892996,global_loss: 0.8900355100631714\n",
      "Step 48: BSNE_Loss=0.7113,local_loss: 0.125930763715271,global_loss: 0.5853976011276245\n",
      "Step 49: BSNE_Loss=1.0486,local_loss: 0.2407541254821281,global_loss: 0.8078003525733948\n",
      "Step 50: BSNE_Loss=0.9624,local_loss: 0.04153113676894806,global_loss: 0.9209150075912476\n",
      "Step 51: BSNE_Loss=0.6605,local_loss: 0.21656252365298534,global_loss: 0.44395512342453003\n",
      "Step 52: BSNE_Loss=0.8514,local_loss: 0.1164183904409271,global_loss: 0.7349905967712402\n",
      "Step 53: BSNE_Loss=1.0331,local_loss: 0.2210659342551774,global_loss: 0.8120418787002563\n",
      "Step 54: BSNE_Loss=1.0231,local_loss: 0.12758097603499477,global_loss: 0.8955281376838684\n",
      "Step 55: BSNE_Loss=1.2328,local_loss: 0.3071496720119852,global_loss: 0.9256512522697449\n",
      "Step 56: BSNE_Loss=0.8804,local_loss: 0.1859649422432434,global_loss: 0.6944563388824463\n",
      "Step 57: BSNE_Loss=0.9704,local_loss: 0.05568836072620834,global_loss: 0.9147391319274902\n",
      "Step 58: BSNE_Loss=0.5155,local_loss: 0.09371085331386844,global_loss: 0.42182278633117676\n",
      "Step 59: BSNE_Loss=0.7981,local_loss: 0.1227007117174105,global_loss: 0.6754085421562195\n",
      "Step 60: BSNE_Loss=1.0735,local_loss: 0.16816444871612365,global_loss: 0.9053655862808228\n",
      "Step 61: BSNE_Loss=1.3109,local_loss: 0.1243411128325273,global_loss: 1.1865135431289673\n",
      "Step 62: BSNE_Loss=1.0302,local_loss: 0.11628846401804178,global_loss: 0.9138911962509155\n",
      "Step 63: BSNE_Loss=0.9273,local_loss: 0.04145244085543814,global_loss: 0.8858587741851807\n",
      "Step 64: BSNE_Loss=0.5921,local_loss: 0.14712960937138672,global_loss: 0.4449350833892822\n",
      "Step 65: BSNE_Loss=0.9972,local_loss: 0.12589844117169446,global_loss: 0.8713371157646179\n",
      "Step 66: BSNE_Loss=0.9012,local_loss: 0.13957063792664368,global_loss: 0.7616142630577087\n",
      "Step 67: BSNE_Loss=0.8928,local_loss: 0.02463787546430102,global_loss: 0.8681713342666626\n",
      "Step 68: BSNE_Loss=0.8678,local_loss: 0.20202178102062507,global_loss: 0.6657969951629639\n",
      "Step 69: BSNE_Loss=0.9438,local_loss: 0.09817045973859856,global_loss: 0.8456107378005981\n",
      "Step 70: BSNE_Loss=0.4432,local_loss: 0.1535073273179312,global_loss: 0.28971314430236816\n",
      "Step 71: BSNE_Loss=0.6638,local_loss: -0.05181196311703446,global_loss: 0.7155802845954895\n",
      "Step 72: BSNE_Loss=0.5613,local_loss: 0.06465556495049281,global_loss: 0.4966445565223694\n",
      "Step 73: BSNE_Loss=0.6874,local_loss: 0.1171818704501868,global_loss: 0.5702600479125977\n",
      "Step 74: BSNE_Loss=0.5912,local_loss: 0.14888964111184197,global_loss: 0.44234469532966614\n",
      "Step 75: BSNE_Loss=0.5386,local_loss: 0.09187044840889586,global_loss: 0.446688175201416\n",
      "Step 76: BSNE_Loss=0.9935,local_loss: -0.0027045430878563925,global_loss: 0.9961988925933838\n",
      "Step 77: BSNE_Loss=0.9752,local_loss: 0.18986361954369135,global_loss: 0.7853834629058838\n",
      "Step 78: BSNE_Loss=1.1510,local_loss: 0.06668261637362842,global_loss: 1.084333896636963\n",
      "Step 79: BSNE_Loss=0.8149,local_loss: 0.06740583045493581,global_loss: 0.7474921941757202\n",
      "Step 80: BSNE_Loss=1.1274,local_loss: 0.07067913752891981,global_loss: 1.0567195415496826\n",
      "Step 81: BSNE_Loss=0.4621,local_loss: 0.3678224888223865,global_loss: 0.09430139511823654\n",
      "Step 82: BSNE_Loss=0.6254,local_loss: 0.015185868803575725,global_loss: 0.6101816296577454\n",
      "Step 83: BSNE_Loss=0.6401,local_loss: 0.06630575045599656,global_loss: 0.5737454891204834\n",
      "Step 84: BSNE_Loss=0.5716,local_loss: 0.12483642561097781,global_loss: 0.4467819929122925\n",
      "Step 85: BSNE_Loss=0.6710,local_loss: 0.0780864534848316,global_loss: 0.592937707901001\n",
      "Step 86: BSNE_Loss=1.1060,local_loss: 0.17192500321569518,global_loss: 0.9341059327125549\n",
      "Step 87: BSNE_Loss=0.5842,local_loss: 0.0033495703385964417,global_loss: 0.5808497071266174\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 88: BSNE_Loss=0.7042,local_loss: 0.07073474547643872,global_loss: 0.6335086226463318\n",
      "Step 89: BSNE_Loss=0.8619,local_loss: 0.04409336577782714,global_loss: 0.8178209662437439\n",
      "Step 90: BSNE_Loss=0.7809,local_loss: 0.11177628970844669,global_loss: 0.6691005229949951\n",
      "Step 91: BSNE_Loss=0.5878,local_loss: 0.1907235075566002,global_loss: 0.3970305621623993\n",
      "Step 92: BSNE_Loss=1.0545,local_loss: 0.09140737799583479,global_loss: 0.9631297588348389\n",
      "Step 93: BSNE_Loss=1.0057,local_loss: 0.12234952529670017,global_loss: 0.8833375573158264\n",
      "Step 94: BSNE_Loss=0.2467,local_loss: -0.043331004646974355,global_loss: 0.29006874561309814\n",
      "Step 95: BSNE_Loss=0.6502,local_loss: -0.021867223192661153,global_loss: 0.6721089482307434\n",
      "Step 96: BSNE_Loss=1.2777,local_loss: 0.050620043726488824,global_loss: 1.2270786762237549\n",
      "Step 97: BSNE_Loss=0.2561,local_loss: 0.11850625395279366,global_loss: 0.13763411343097687\n",
      "Step 98: BSNE_Loss=0.7342,local_loss: 0.05640997617068227,global_loss: 0.6777912378311157\n",
      "Step 99: BSNE_Loss=0.5127,local_loss: 0.20468464205516337,global_loss: 0.30798980593681335\n",
      "Step 100: BSNE_Loss=1.0725,local_loss: 0.2604826057963727,global_loss: 0.8120108842849731\n",
      "Step 101: BSNE_Loss=0.5158,local_loss: 0.2856806406087341,global_loss: 0.23012766242027283\n",
      "Step 102: BSNE_Loss=0.5983,local_loss: 0.16704394455803048,global_loss: 0.4312567412853241\n",
      "Step 103: BSNE_Loss=0.2506,local_loss: 0.2824077439133465,global_loss: -0.031760264188051224\n",
      "Step 104: BSNE_Loss=0.3226,local_loss: 0.2608998179704532,global_loss: 0.06171603873372078\n",
      "Step 105: BSNE_Loss=0.5736,local_loss: 0.22406896841519314,global_loss: 0.34948766231536865\n",
      "Step 106: BSNE_Loss=1.3753,local_loss: 0.06913714397087213,global_loss: 1.3061672449111938\n",
      "Step 107: BSNE_Loss=0.4657,local_loss: 0.09014458640340063,global_loss: 0.3755589723587036\n",
      "Step 108: BSNE_Loss=0.9950,local_loss: 0.09669756618202786,global_loss: 0.8983203768730164\n",
      "Step 109: BSNE_Loss=0.5096,local_loss: 0.08383958651177048,global_loss: 0.42576804757118225\n",
      "Step 110: BSNE_Loss=1.0843,local_loss: 0.11956492961660388,global_loss: 0.9647260904312134\n",
      "Step 111: BSNE_Loss=0.1917,local_loss: 0.20206742350578885,global_loss: -0.01036043930798769\n",
      "Center node 5 finished at step 112 with loss 0.1917\n",
      "\n",
      "=== Pretraining Epoch 6 (Center Node: 6) ===\n",
      "Step 0: BSNE_Loss=0.7061,local_loss: 0.08906056595450754,global_loss: 0.6170560717582703\n",
      "Step 1: BSNE_Loss=1.0908,local_loss: 0.06194684342178503,global_loss: 1.0288561582565308\n",
      "Step 2: BSNE_Loss=0.8746,local_loss: 0.12658555937443203,global_loss: 0.7480396628379822\n",
      "Step 3: BSNE_Loss=0.5405,local_loss: 0.08593647731623037,global_loss: 0.45459866523742676\n",
      "Step 4: BSNE_Loss=0.8753,local_loss: 0.23310207696441573,global_loss: 0.6422279477119446\n",
      "Step 5: BSNE_Loss=0.7076,local_loss: 0.11949967825701024,global_loss: 0.5881271958351135\n",
      "Step 6: BSNE_Loss=0.6440,local_loss: 0.13811804450630377,global_loss: 0.5058998465538025\n",
      "Step 7: BSNE_Loss=0.9636,local_loss: 0.21686146037811077,global_loss: 0.7467808723449707\n",
      "Step 8: BSNE_Loss=0.8892,local_loss: 0.14709339650267503,global_loss: 0.7420756816864014\n",
      "Step 9: BSNE_Loss=0.8730,local_loss: 0.051282717702350364,global_loss: 0.8217554092407227\n",
      "Step 10: BSNE_Loss=0.7138,local_loss: 0.10529187155268317,global_loss: 0.6085299849510193\n",
      "Step 11: BSNE_Loss=0.6007,local_loss: 0.0804711616530663,global_loss: 0.5201983451843262\n",
      "Step 12: BSNE_Loss=1.2424,local_loss: 0.15054641161995913,global_loss: 1.0918207168579102\n",
      "Step 13: BSNE_Loss=0.5605,local_loss: 0.16350975326163808,global_loss: 0.3969411849975586\n",
      "Step 14: BSNE_Loss=0.6162,local_loss: 0.18060268507762864,global_loss: 0.43564465641975403\n",
      "Step 15: BSNE_Loss=0.7860,local_loss: 0.18871021895341578,global_loss: 0.5972449779510498\n",
      "Step 16: BSNE_Loss=-0.1190,local_loss: 0.15424243264929818,global_loss: -0.2732255160808563\n",
      "Center node 6 finished at step 17 with loss -0.1190\n",
      "\n",
      "=== Pretraining Epoch 7 (Center Node: 7) ===\n",
      "Step 0: BSNE_Loss=0.6597,local_loss: 0.07450345396437333,global_loss: 0.5852056741714478\n",
      "Step 1: BSNE_Loss=0.2167,local_loss: 0.11992609235329922,global_loss: 0.0967320054769516\n",
      "Step 2: BSNE_Loss=0.5339,local_loss: 0.3421851733760019,global_loss: 0.1916923075914383\n",
      "Step 3: BSNE_Loss=0.0763,local_loss: 0.10110810879733347,global_loss: -0.02482004091143608\n",
      "Center node 7 finished at step 4 with loss 0.0763\n",
      "\n",
      "=== Pretraining Epoch 8 (Center Node: 8) ===\n",
      "Step 0: BSNE_Loss=0.4110,local_loss: 0.15150793271312693,global_loss: 0.2595006823539734\n",
      "Step 1: BSNE_Loss=0.2675,local_loss: 0.1833682229092245,global_loss: 0.0841275230050087\n",
      "Step 2: BSNE_Loss=1.0042,local_loss: 0.07765984359166257,global_loss: 0.9265016913414001\n",
      "Step 3: BSNE_Loss=0.7478,local_loss: 0.2312487540068643,global_loss: 0.5165086984634399\n",
      "Step 4: BSNE_Loss=0.5350,local_loss: 0.026136506677484465,global_loss: 0.5088869333267212\n",
      "Step 5: BSNE_Loss=0.5179,local_loss: 0.2000111405084135,global_loss: 0.31785836815834045\n",
      "Step 6: BSNE_Loss=0.6146,local_loss: 0.1164194525416787,global_loss: 0.49817734956741333\n",
      "Step 7: BSNE_Loss=0.3832,local_loss: 0.1719518460770725,global_loss: 0.21126285195350647\n",
      "Step 8: BSNE_Loss=0.5146,local_loss: 0.1623421798968345,global_loss: 0.3522636294364929\n",
      "Step 9: BSNE_Loss=0.5761,local_loss: 0.017259453452472578,global_loss: 0.5588741898536682\n",
      "Step 10: BSNE_Loss=0.2571,local_loss: 0.004400753977905501,global_loss: 0.25270920991897583\n",
      "Step 11: BSNE_Loss=0.6003,local_loss: 0.016120719485023863,global_loss: 0.5842087864875793\n",
      "Step 12: BSNE_Loss=0.7438,local_loss: 0.15346056215750142,global_loss: 0.5903744697570801\n",
      "Step 13: BSNE_Loss=0.7894,local_loss: 0.19276548804681717,global_loss: 0.5966564416885376\n",
      "Step 14: BSNE_Loss=0.5334,local_loss: 0.05330100743600642,global_loss: 0.4801378846168518\n",
      "Step 15: BSNE_Loss=0.6553,local_loss: 0.06976992125159107,global_loss: 0.5854803919792175\n",
      "Step 16: BSNE_Loss=0.6100,local_loss: 0.07447752623731464,global_loss: 0.5355061292648315\n",
      "Step 17: BSNE_Loss=0.8884,local_loss: 0.06403797074343466,global_loss: 0.8243815898895264\n",
      "Step 18: BSNE_Loss=0.6711,local_loss: 0.05668508454947444,global_loss: 0.6144029498100281\n",
      "Step 19: BSNE_Loss=0.6973,local_loss: 0.16338919812918504,global_loss: 0.5339203476905823\n",
      "Step 20: BSNE_Loss=0.6962,local_loss: 0.17032081586210127,global_loss: 0.5258635878562927\n",
      "Step 21: BSNE_Loss=0.7747,local_loss: 0.24450263421239085,global_loss: 0.5302311182022095\n",
      "Step 22: BSNE_Loss=0.7807,local_loss: 0.12842710172513175,global_loss: 0.6522424221038818\n",
      "Step 23: BSNE_Loss=0.4662,local_loss: 0.23187260161017634,global_loss: 0.2343500852584839\n",
      "Step 24: BSNE_Loss=0.8474,local_loss: 0.12170776397423762,global_loss: 0.7257102727890015\n",
      "Step 25: BSNE_Loss=1.1287,local_loss: 0.28558711093437594,global_loss: 0.8431236743927002\n",
      "Step 26: BSNE_Loss=0.7328,local_loss: 0.11777326629759212,global_loss: 0.6150420308113098\n",
      "Step 27: BSNE_Loss=0.4295,local_loss: 0.05153120337918473,global_loss: 0.37794750928878784\n",
      "Step 28: BSNE_Loss=0.2016,local_loss: 0.170257942995928,global_loss: 0.031391873955726624\n",
      "Step 29: BSNE_Loss=0.7850,local_loss: 0.16773414797830988,global_loss: 0.6172255873680115\n",
      "Step 30: BSNE_Loss=1.0032,local_loss: 0.08748250783813993,global_loss: 0.9157493710517883\n",
      "Step 31: BSNE_Loss=0.3573,local_loss: 0.08912540561332379,global_loss: 0.2681959569454193\n",
      "Step 32: BSNE_Loss=0.7563,local_loss: 0.28041139979056207,global_loss: 0.4759131669998169\n",
      "Step 33: BSNE_Loss=0.6254,local_loss: 0.16264011658095903,global_loss: 0.4627308249473572\n",
      "Step 34: BSNE_Loss=0.7809,local_loss: 0.012553932228634981,global_loss: 0.7683470249176025\n",
      "Step 35: BSNE_Loss=0.3987,local_loss: 0.06982523985031569,global_loss: 0.3289194405078888\n",
      "Step 36: BSNE_Loss=0.8957,local_loss: 0.10969058931803784,global_loss: 0.7860056161880493\n",
      "Step 37: BSNE_Loss=0.4526,local_loss: 0.20431553664925672,global_loss: 0.24827010929584503\n",
      "Step 38: BSNE_Loss=0.5548,local_loss: 0.16823291307350283,global_loss: 0.3865608870983124\n",
      "Step 39: BSNE_Loss=0.5262,local_loss: 0.13945344190083048,global_loss: 0.3867420256137848\n",
      "Step 40: BSNE_Loss=0.5520,local_loss: 0.07830784397462129,global_loss: 0.47367480397224426\n",
      "Step 41: BSNE_Loss=0.3088,local_loss: 0.15338238547886546,global_loss: 0.15537023544311523\n",
      "Step 42: BSNE_Loss=0.4858,local_loss: 0.1031450841453499,global_loss: 0.3826948404312134\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 43: BSNE_Loss=0.7935,local_loss: 0.19754061509706014,global_loss: 0.5959606766700745\n",
      "Step 44: BSNE_Loss=0.9528,local_loss: 0.28918846234491535,global_loss: 0.6635753512382507\n",
      "Step 45: BSNE_Loss=0.7816,local_loss: 0.1649765604864693,global_loss: 0.6166073679924011\n",
      "Step 46: BSNE_Loss=0.5706,local_loss: 0.11026271909581883,global_loss: 0.4603312611579895\n",
      "Step 47: BSNE_Loss=0.7372,local_loss: 0.11723526256013626,global_loss: 0.619970977306366\n",
      "Step 48: BSNE_Loss=0.4015,local_loss: 0.14238549765903152,global_loss: 0.2591639459133148\n",
      "Step 49: BSNE_Loss=0.5880,local_loss: 0.13330329732107146,global_loss: 0.4546765089035034\n",
      "Step 50: BSNE_Loss=0.1708,local_loss: 0.025836665729224075,global_loss: 0.14494588971138\n",
      "Center node 8 finished at step 51 with loss 0.1708\n",
      "\n",
      "=== Pretraining Epoch 9 (Center Node: 9) ===\n",
      "Step 0: BSNE_Loss=0.7627,local_loss: 0.08451893647952648,global_loss: 0.6782124638557434\n",
      "Step 1: BSNE_Loss=0.9188,local_loss: 0.16119406133469444,global_loss: 0.7575870752334595\n",
      "Step 2: BSNE_Loss=1.0519,local_loss: 0.07520979837744737,global_loss: 0.9767374396324158\n",
      "Step 3: BSNE_Loss=0.9447,local_loss: -0.034636563235730736,global_loss: 0.9793379306793213\n",
      "Step 4: BSNE_Loss=0.6087,local_loss: 0.009515396513135342,global_loss: 0.5991910099983215\n",
      "Step 5: BSNE_Loss=0.6440,local_loss: 0.09304440157750815,global_loss: 0.5509723424911499\n",
      "Step 6: BSNE_Loss=0.8770,local_loss: 0.1552416895884096,global_loss: 0.721716582775116\n",
      "Step 7: BSNE_Loss=0.8738,local_loss: 0.3669398423828649,global_loss: 0.5068663954734802\n",
      "Step 8: BSNE_Loss=0.6462,local_loss: -0.03010318606027811,global_loss: 0.6762938499450684\n",
      "Step 9: BSNE_Loss=0.6173,local_loss: 0.05262761630324618,global_loss: 0.5646697878837585\n",
      "Step 10: BSNE_Loss=0.7737,local_loss: 0.013324981186187586,global_loss: 0.7603362798690796\n",
      "Step 11: BSNE_Loss=0.8899,local_loss: 0.1079749022446423,global_loss: 0.7819116711616516\n",
      "Step 12: BSNE_Loss=0.9960,local_loss: 0.13426489126702718,global_loss: 0.8617085218429565\n",
      "Step 13: BSNE_Loss=0.5733,local_loss: 0.05354715972100826,global_loss: 0.5197293162345886\n",
      "Step 14: BSNE_Loss=0.8946,local_loss: 0.11613896501742861,global_loss: 0.778459906578064\n",
      "Step 15: BSNE_Loss=0.9111,local_loss: 0.08672692050899145,global_loss: 0.8243625164031982\n",
      "Step 16: BSNE_Loss=0.6890,local_loss: 0.05833471856931035,global_loss: 0.630702793598175\n",
      "Step 17: BSNE_Loss=0.3668,local_loss: 0.15767751521615955,global_loss: 0.20909759402275085\n",
      "Step 18: BSNE_Loss=0.4258,local_loss: 0.04802319219049611,global_loss: 0.3778095245361328\n",
      "Step 19: BSNE_Loss=0.8386,local_loss: 0.10560446227557282,global_loss: 0.733010470867157\n",
      "Step 20: BSNE_Loss=0.8466,local_loss: 0.05142136489541794,global_loss: 0.7952211499214172\n",
      "Step 21: BSNE_Loss=0.6011,local_loss: 0.06391020782454092,global_loss: 0.5371780395507812\n",
      "Step 22: BSNE_Loss=0.7206,local_loss: 0.19579930807277243,global_loss: 0.5248318910598755\n",
      "Step 23: BSNE_Loss=0.8845,local_loss: 0.0333563594593382,global_loss: 0.8511214256286621\n",
      "Step 24: BSNE_Loss=0.7852,local_loss: 0.011065214323821024,global_loss: 0.7741289138793945\n",
      "Step 25: BSNE_Loss=1.1442,local_loss: 0.22648820032846184,global_loss: 0.9176905155181885\n",
      "Step 26: BSNE_Loss=1.5197,local_loss: 0.15766167925028066,global_loss: 1.3620703220367432\n",
      "Step 27: BSNE_Loss=0.9126,local_loss: 0.12266234260370348,global_loss: 0.7899060249328613\n",
      "Step 28: BSNE_Loss=0.3621,local_loss: 0.011800572654458358,global_loss: 0.3502766489982605\n",
      "Step 29: BSNE_Loss=0.7325,local_loss: 0.0638955971483244,global_loss: 0.6685616374015808\n",
      "Step 30: BSNE_Loss=0.6229,local_loss: 0.0882206655795715,global_loss: 0.5347135663032532\n",
      "Step 31: BSNE_Loss=0.3698,local_loss: 0.11419933028495485,global_loss: 0.2556003928184509\n",
      "Step 32: BSNE_Loss=0.7959,local_loss: 0.19665512291369458,global_loss: 0.5992006063461304\n",
      "Step 33: BSNE_Loss=0.2269,local_loss: 0.07861233052890536,global_loss: 0.14829377830028534\n",
      "Step 34: BSNE_Loss=0.6203,local_loss: 0.12013100011304008,global_loss: 0.5001821517944336\n",
      "Step 35: BSNE_Loss=0.7780,local_loss: 0.09324729635870864,global_loss: 0.6847707033157349\n",
      "Step 36: BSNE_Loss=0.8329,local_loss: 0.02269918831989113,global_loss: 0.8102020025253296\n",
      "Step 37: BSNE_Loss=0.5148,local_loss: 0.11676465163435737,global_loss: 0.3980635106563568\n",
      "Step 38: BSNE_Loss=0.8022,local_loss: 0.07623476948482871,global_loss: 0.7259689569473267\n",
      "Step 39: BSNE_Loss=0.6390,local_loss: 0.07675017389373383,global_loss: 0.5622338056564331\n",
      "Step 40: BSNE_Loss=0.9515,local_loss: 0.12528281792813326,global_loss: 0.8262385725975037\n",
      "Step 41: BSNE_Loss=0.4339,local_loss: 0.11604771978996524,global_loss: 0.3178928792476654\n",
      "Step 42: BSNE_Loss=0.9425,local_loss: 0.04760616732912322,global_loss: 0.8949136734008789\n",
      "Step 43: BSNE_Loss=0.8172,local_loss: 0.0988374490872638,global_loss: 0.7183718085289001\n",
      "Step 44: BSNE_Loss=1.1512,local_loss: 0.08891753651907358,global_loss: 1.0623189210891724\n",
      "Step 45: BSNE_Loss=0.7610,local_loss: 0.12126655653345354,global_loss: 0.6397536396980286\n",
      "Step 46: BSNE_Loss=0.8657,local_loss: 0.016763636075901173,global_loss: 0.848900556564331\n",
      "Step 47: BSNE_Loss=0.4994,local_loss: 0.009335219003867827,global_loss: 0.4900732636451721\n",
      "Step 48: BSNE_Loss=0.7534,local_loss: 0.047590166313086046,global_loss: 0.7058088183403015\n",
      "Step 49: BSNE_Loss=0.9296,local_loss: 0.07892309949165607,global_loss: 0.8506873846054077\n",
      "Step 50: BSNE_Loss=0.6303,local_loss: 0.039056592276825126,global_loss: 0.5912537574768066\n",
      "Step 51: BSNE_Loss=0.9365,local_loss: 0.038437022803375735,global_loss: 0.8981029391288757\n",
      "Step 52: BSNE_Loss=0.4842,local_loss: 0.09275915623948042,global_loss: 0.3914632797241211\n",
      "Step 53: BSNE_Loss=0.3411,local_loss: 0.1260550618796243,global_loss: 0.21503207087516785\n",
      "Step 54: BSNE_Loss=0.8593,local_loss: 0.02660892508039144,global_loss: 0.8327097296714783\n",
      "Step 55: BSNE_Loss=0.5355,local_loss: 0.034943951047179345,global_loss: 0.5005965232849121\n",
      "Step 56: BSNE_Loss=1.2899,local_loss: 0.053282560050582295,global_loss: 1.2366650104522705\n",
      "Step 57: BSNE_Loss=0.5811,local_loss: 0.056253867137110516,global_loss: 0.5248221755027771\n",
      "Step 58: BSNE_Loss=0.5914,local_loss: 0.10134823006841685,global_loss: 0.490042507648468\n",
      "Step 59: BSNE_Loss=1.2640,local_loss: 0.014228243328533008,global_loss: 1.2497726678848267\n",
      "Step 60: BSNE_Loss=0.2844,local_loss: -0.029392789441892955,global_loss: 0.31383684277534485\n",
      "Step 61: BSNE_Loss=0.7469,local_loss: 0.10322936924542848,global_loss: 0.6436748504638672\n",
      "Step 62: BSNE_Loss=0.7302,local_loss: 0.007143979316540126,global_loss: 0.7230260372161865\n",
      "Step 63: BSNE_Loss=0.9840,local_loss: 0.08662287275260891,global_loss: 0.8973804116249084\n",
      "Step 64: BSNE_Loss=0.8345,local_loss: 0.16553048813985896,global_loss: 0.6690107583999634\n",
      "Step 65: BSNE_Loss=0.6226,local_loss: 0.026521808815073513,global_loss: 0.5961125493049622\n",
      "Step 66: BSNE_Loss=0.6605,local_loss: -0.0353895532122403,global_loss: 0.6959393620491028\n",
      "Step 67: BSNE_Loss=0.4868,local_loss: -0.0525262287208081,global_loss: 0.5393701791763306\n",
      "Step 68: BSNE_Loss=0.5494,local_loss: 0.2800746525914965,global_loss: 0.2692950963973999\n",
      "Step 69: BSNE_Loss=0.9448,local_loss: 0.04021214383431237,global_loss: 0.9045941829681396\n",
      "Step 70: BSNE_Loss=0.5611,local_loss: 0.1043431993699792,global_loss: 0.4567070007324219\n",
      "Step 71: BSNE_Loss=0.8191,local_loss: 0.026733540743195103,global_loss: 0.7923166751861572\n",
      "Step 72: BSNE_Loss=0.7139,local_loss: 0.11726097133736815,global_loss: 0.596625804901123\n",
      "Step 73: BSNE_Loss=1.2553,local_loss: 0.023020602594687693,global_loss: 1.232267141342163\n",
      "Step 74: BSNE_Loss=1.0656,local_loss: 0.06918463556083368,global_loss: 0.9964389801025391\n",
      "Step 75: BSNE_Loss=0.7664,local_loss: 0.12174065239760783,global_loss: 0.6446292996406555\n",
      "Step 76: BSNE_Loss=0.0669,local_loss: -0.03369050276802193,global_loss: 0.10061899572610855\n",
      "Center node 9 finished at step 77 with loss 0.0669\n",
      "\n",
      "=== Pretraining Epoch 10 (Center Node: 10) ===\n",
      "Step 0: BSNE_Loss=0.6492,local_loss: 0.5015139227289513,global_loss: 0.1476469784975052\n",
      "Step 1: BSNE_Loss=0.3342,local_loss: 0.49798757939589244,global_loss: -0.16381624341011047\n",
      "Step 2: BSNE_Loss=0.4947,local_loss: 0.30620550223630805,global_loss: 0.1884903907775879\n",
      "Step 3: BSNE_Loss=0.6277,local_loss: 0.43352325006417436,global_loss: 0.19421905279159546\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 4: BSNE_Loss=0.6518,local_loss: 0.21662202749674275,global_loss: 0.4352145195007324\n",
      "Step 5: BSNE_Loss=0.5701,local_loss: 0.3057996807713003,global_loss: 0.2642953097820282\n",
      "Step 6: BSNE_Loss=0.9141,local_loss: 0.31492341194491574,global_loss: 0.5991714000701904\n",
      "Step 7: BSNE_Loss=0.7479,local_loss: 0.3344660787279243,global_loss: 0.41348230838775635\n",
      "Step 8: BSNE_Loss=0.4190,local_loss: 0.25261211387703403,global_loss: 0.16639403998851776\n",
      "Step 9: BSNE_Loss=0.3420,local_loss: 0.3181599422565892,global_loss: 0.02379561774432659\n",
      "Step 10: BSNE_Loss=0.1726,local_loss: 0.23660417870441006,global_loss: -0.06398465484380722\n",
      "Center node 10 finished at step 11 with loss 0.1726\n",
      "\n",
      "=== Pretraining Epoch 11 (Center Node: 11) ===\n",
      "Step 0: BSNE_Loss=0.5870,local_loss: 0.21366880073584552,global_loss: 0.3733155131340027\n",
      "Step 1: BSNE_Loss=0.5872,local_loss: 0.18381326964131398,global_loss: 0.40343379974365234\n",
      "Step 2: BSNE_Loss=0.8222,local_loss: 0.17837455606497604,global_loss: 0.6437790393829346\n",
      "Step 3: BSNE_Loss=0.7009,local_loss: 0.2060777549423552,global_loss: 0.49477893114089966\n",
      "Step 4: BSNE_Loss=0.8096,local_loss: 0.28948168328460694,global_loss: 0.520114004611969\n",
      "Step 5: BSNE_Loss=0.8557,local_loss: 0.14254640946891267,global_loss: 0.7131667733192444\n",
      "Step 6: BSNE_Loss=0.5865,local_loss: 0.16465903051219832,global_loss: 0.4218863546848297\n",
      "Step 7: BSNE_Loss=0.7027,local_loss: 0.2177194215264517,global_loss: 0.48497772216796875\n",
      "Step 8: BSNE_Loss=0.6157,local_loss: 0.11688232401109262,global_loss: 0.4987964630126953\n",
      "Step 9: BSNE_Loss=0.4076,local_loss: 0.1735111818123785,global_loss: 0.2340652495622635\n",
      "Step 10: BSNE_Loss=0.8133,local_loss: 0.3541700498940503,global_loss: 0.4591634273529053\n",
      "Step 11: BSNE_Loss=0.5793,local_loss: 0.1626392297638084,global_loss: 0.41668736934661865\n",
      "Step 12: BSNE_Loss=0.8749,local_loss: 0.20473590440401157,global_loss: 0.6701400876045227\n",
      "Step 13: BSNE_Loss=0.9340,local_loss: 0.27997764690289484,global_loss: 0.654055655002594\n",
      "Step 14: BSNE_Loss=0.6563,local_loss: 0.051364130273727646,global_loss: 0.6049366593360901\n",
      "Step 15: BSNE_Loss=1.0454,local_loss: 0.23109202785402477,global_loss: 0.8143520951271057\n",
      "Step 16: BSNE_Loss=0.6555,local_loss: 0.2025735458474385,global_loss: 0.4529636800289154\n",
      "Step 17: BSNE_Loss=0.6431,local_loss: 0.1877093668187591,global_loss: 0.455386221408844\n",
      "Step 18: BSNE_Loss=0.3742,local_loss: 0.14486361123412375,global_loss: 0.22933675348758698\n",
      "Step 19: BSNE_Loss=0.6092,local_loss: 0.22952766828075244,global_loss: 0.3796328902244568\n",
      "Step 20: BSNE_Loss=0.1669,local_loss: 0.1626543323761471,global_loss: 0.004290067590773106\n",
      "Center node 11 finished at step 21 with loss 0.1669\n",
      "\n",
      "=== Pretraining Epoch 12 (Center Node: 12) ===\n",
      "Step 0: BSNE_Loss=0.5172,local_loss: 0.12174251276687727,global_loss: 0.3954443335533142\n",
      "Step 1: BSNE_Loss=0.6379,local_loss: 0.08721221270506621,global_loss: 0.5506696105003357\n",
      "Step 2: BSNE_Loss=0.5997,local_loss: 0.15260312507059326,global_loss: 0.4471450448036194\n",
      "Step 3: BSNE_Loss=0.4936,local_loss: 0.08211338389174491,global_loss: 0.41153308749198914\n",
      "Step 4: BSNE_Loss=0.4074,local_loss: 0.23064141307572036,global_loss: 0.17678342759609222\n",
      "Step 5: BSNE_Loss=0.5159,local_loss: 0.17275095684315733,global_loss: 0.3431601822376251\n",
      "Step 6: BSNE_Loss=-0.1945,local_loss: 0.19151122982499458,global_loss: -0.38597771525382996\n",
      "Center node 12 finished at step 7 with loss -0.1945\n",
      "\n",
      "=== Pretraining Epoch 13 (Center Node: 13) ===\n",
      "Step 0: BSNE_Loss=0.8888,local_loss: 0.16197364275808593,global_loss: 0.7268491983413696\n",
      "Step 1: BSNE_Loss=0.7131,local_loss: 0.10365639654429383,global_loss: 0.6094807386398315\n",
      "Step 2: BSNE_Loss=0.7091,local_loss: 0.2422949101112078,global_loss: 0.46676260232925415\n",
      "Step 3: BSNE_Loss=0.8283,local_loss: 0.18546506278023672,global_loss: 0.6428716778755188\n",
      "Step 4: BSNE_Loss=0.9725,local_loss: 0.14856089272813136,global_loss: 0.8239595890045166\n",
      "Step 5: BSNE_Loss=0.9342,local_loss: 0.14720815525770242,global_loss: 0.7869834303855896\n",
      "Step 6: BSNE_Loss=1.0117,local_loss: 0.16126898188018512,global_loss: 0.8503811359405518\n",
      "Step 7: BSNE_Loss=0.5917,local_loss: 0.08650020740928895,global_loss: 0.505194365978241\n",
      "Step 8: BSNE_Loss=0.8652,local_loss: 0.16910616053381078,global_loss: 0.6961159706115723\n",
      "Step 9: BSNE_Loss=0.6437,local_loss: 0.32998674223090535,global_loss: 0.3137253522872925\n",
      "Step 10: BSNE_Loss=1.1391,local_loss: 0.1284925797939281,global_loss: 1.0105714797973633\n",
      "Step 11: BSNE_Loss=0.5607,local_loss: 0.16664254479766122,global_loss: 0.39409491419792175\n",
      "Step 12: BSNE_Loss=1.0370,local_loss: 0.23580185392920158,global_loss: 0.801217794418335\n",
      "Step 13: BSNE_Loss=0.6853,local_loss: 0.1046450139946657,global_loss: 0.5806677341461182\n",
      "Step 14: BSNE_Loss=1.0663,local_loss: 0.22957200406993308,global_loss: 0.8367569446563721\n",
      "Step 15: BSNE_Loss=0.7071,local_loss: 0.155123748081968,global_loss: 0.5519473552703857\n",
      "Step 16: BSNE_Loss=0.8610,local_loss: 0.18810671649575392,global_loss: 0.6729260683059692\n",
      "Step 17: BSNE_Loss=0.6324,local_loss: 0.07424186864634769,global_loss: 0.5581955909729004\n",
      "Step 18: BSNE_Loss=0.7864,local_loss: 0.17383212953381075,global_loss: 0.6126174926757812\n",
      "Step 19: BSNE_Loss=0.8280,local_loss: 0.11083189357405299,global_loss: 0.7171735167503357\n",
      "Step 20: BSNE_Loss=0.7255,local_loss: 0.13234284413428285,global_loss: 0.5931780338287354\n",
      "Step 21: BSNE_Loss=0.7685,local_loss: 0.14313968960481296,global_loss: 0.6253345608711243\n",
      "Step 22: BSNE_Loss=0.9323,local_loss: 0.12275692613626177,global_loss: 0.8095238208770752\n",
      "Step 23: BSNE_Loss=0.8342,local_loss: 0.20612780128176794,global_loss: 0.6280643343925476\n",
      "Step 24: BSNE_Loss=0.8970,local_loss: 0.26200862998145324,global_loss: 0.6349586248397827\n",
      "Step 25: BSNE_Loss=0.9059,local_loss: 0.07087753151954948,global_loss: 0.8350439071655273\n",
      "Step 26: BSNE_Loss=0.6823,local_loss: 0.23247695418161357,global_loss: 0.44981884956359863\n",
      "Step 27: BSNE_Loss=0.2945,local_loss: 0.25427049592634876,global_loss: 0.04018019512295723\n",
      "Step 28: BSNE_Loss=1.0175,local_loss: 0.2130573518709383,global_loss: 0.8044342398643494\n",
      "Step 29: BSNE_Loss=0.9824,local_loss: 0.17124596881200713,global_loss: 0.811194658279419\n",
      "Step 30: BSNE_Loss=0.4369,local_loss: 0.07303821746150237,global_loss: 0.3638306260108948\n",
      "Step 31: BSNE_Loss=0.6085,local_loss: 0.15872921402807572,global_loss: 0.4497712254524231\n",
      "Step 32: BSNE_Loss=0.7507,local_loss: 0.18020613988571577,global_loss: 0.5705112218856812\n",
      "Step 33: BSNE_Loss=0.8393,local_loss: 0.1398620970818711,global_loss: 0.6994580626487732\n",
      "Step 34: BSNE_Loss=0.7886,local_loss: 0.10252944390946389,global_loss: 0.6861118078231812\n",
      "Step 35: BSNE_Loss=0.9503,local_loss: 0.28542433723995664,global_loss: 0.6648345589637756\n",
      "Step 36: BSNE_Loss=0.8575,local_loss: 0.150168352897225,global_loss: 0.7073757648468018\n",
      "Step 37: BSNE_Loss=0.7093,local_loss: 0.1499562880462108,global_loss: 0.5593576431274414\n",
      "Step 38: BSNE_Loss=0.9399,local_loss: 0.06902729859729222,global_loss: 0.8709142804145813\n",
      "Step 39: BSNE_Loss=0.9757,local_loss: 0.2072231982038123,global_loss: 0.7685238122940063\n",
      "Step 40: BSNE_Loss=0.7310,local_loss: 0.2528055210208598,global_loss: 0.47819986939430237\n",
      "Step 41: BSNE_Loss=1.0766,local_loss: 0.12058897598039878,global_loss: 0.9559640884399414\n",
      "Step 42: BSNE_Loss=0.9946,local_loss: 0.20176993264846732,global_loss: 0.792809009552002\n",
      "Step 43: BSNE_Loss=1.0030,local_loss: 0.14644023847429843,global_loss: 0.8565673828125\n",
      "Step 44: BSNE_Loss=1.1684,local_loss: 0.12061529419817887,global_loss: 1.0477873086929321\n",
      "Step 45: BSNE_Loss=0.9207,local_loss: 0.27196267740217045,global_loss: 0.6487837433815002\n",
      "Step 46: BSNE_Loss=0.7450,local_loss: 0.11976193668091402,global_loss: 0.6252686381340027\n",
      "Step 47: BSNE_Loss=1.2283,local_loss: 0.12954067291607368,global_loss: 1.0987389087677002\n",
      "Step 48: BSNE_Loss=0.7535,local_loss: 0.19818175764616616,global_loss: 0.5553085207939148\n",
      "Step 49: BSNE_Loss=0.7652,local_loss: 0.1890421584234238,global_loss: 0.5761435031890869\n",
      "Step 50: BSNE_Loss=1.2400,local_loss: 0.18277611420285678,global_loss: 1.0572378635406494\n",
      "Step 51: BSNE_Loss=0.8926,local_loss: 0.17251225017315996,global_loss: 0.7200636863708496\n",
      "Step 52: BSNE_Loss=0.5068,local_loss: 0.2376377299642039,global_loss: 0.2691247761249542\n",
      "Step 53: BSNE_Loss=0.8338,local_loss: 0.21056403657276407,global_loss: 0.6232514977455139\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 54: BSNE_Loss=0.4077,local_loss: 0.16704189603526295,global_loss: 0.24065330624580383\n",
      "Step 55: BSNE_Loss=0.8186,local_loss: 0.13353661561953936,global_loss: 0.6850994825363159\n",
      "Step 56: BSNE_Loss=0.7478,local_loss: 0.1051563824904731,global_loss: 0.642681360244751\n",
      "Step 57: BSNE_Loss=0.7121,local_loss: 0.17672744980158533,global_loss: 0.5353752374649048\n",
      "Step 58: BSNE_Loss=0.8470,local_loss: 0.1925635574626381,global_loss: 0.6544367671012878\n",
      "Step 59: BSNE_Loss=0.7963,local_loss: 0.2487436491312156,global_loss: 0.5475292801856995\n",
      "Step 60: BSNE_Loss=0.7087,local_loss: 0.17264896326347937,global_loss: 0.5360777378082275\n",
      "Step 61: BSNE_Loss=0.7431,local_loss: 0.20469367646721887,global_loss: 0.5383955240249634\n",
      "Step 62: BSNE_Loss=0.8013,local_loss: 0.18813794433240477,global_loss: 0.6131824254989624\n",
      "Step 63: BSNE_Loss=0.4419,local_loss: 0.07106698268329475,global_loss: 0.37088122963905334\n",
      "Step 64: BSNE_Loss=0.8593,local_loss: 0.17788014102753757,global_loss: 0.6814242005348206\n",
      "Step 65: BSNE_Loss=0.9863,local_loss: 0.22976605122810156,global_loss: 0.7565565705299377\n",
      "Step 66: BSNE_Loss=0.6064,local_loss: 0.30171326615404986,global_loss: 0.30470606684684753\n",
      "Step 67: BSNE_Loss=1.0210,local_loss: 0.14489218540957538,global_loss: 0.8761516809463501\n",
      "Step 68: BSNE_Loss=0.6249,local_loss: 0.05852638368014929,global_loss: 0.5663847923278809\n",
      "Step 69: BSNE_Loss=0.5799,local_loss: 0.23742050617231097,global_loss: 0.3424435257911682\n",
      "Step 70: BSNE_Loss=0.8559,local_loss: 0.18339656087579098,global_loss: 0.6725521683692932\n",
      "Step 71: BSNE_Loss=0.5954,local_loss: 0.14688909794595342,global_loss: 0.44853007793426514\n",
      "Step 72: BSNE_Loss=0.9419,local_loss: 0.14726786035848388,global_loss: 0.794672966003418\n",
      "Step 73: BSNE_Loss=1.2680,local_loss: 0.22692335264392113,global_loss: 1.041032075881958\n",
      "Step 74: BSNE_Loss=0.6364,local_loss: 0.08974630775033504,global_loss: 0.5466648936271667\n",
      "Step 75: BSNE_Loss=0.8464,local_loss: 0.14365751707925323,global_loss: 0.7027111053466797\n",
      "Step 76: BSNE_Loss=0.8831,local_loss: 0.1466230059983869,global_loss: 0.7364444136619568\n",
      "Step 77: BSNE_Loss=0.5315,local_loss: 0.1830605244485961,global_loss: 0.3484233617782593\n",
      "Step 78: BSNE_Loss=0.4683,local_loss: 0.1272824996839305,global_loss: 0.34103143215179443\n",
      "Step 79: BSNE_Loss=0.4853,local_loss: 0.1672497445630053,global_loss: 0.318043053150177\n",
      "Step 80: BSNE_Loss=0.6718,local_loss: 0.2540519607595729,global_loss: 0.4177064895629883\n",
      "Step 81: BSNE_Loss=1.0661,local_loss: 0.18900081269470492,global_loss: 0.8770588636398315\n",
      "Step 82: BSNE_Loss=0.8932,local_loss: 0.17987442869629153,global_loss: 0.7133288383483887\n",
      "Step 83: BSNE_Loss=0.7578,local_loss: 0.20747921414527426,global_loss: 0.5503038763999939\n",
      "Step 84: BSNE_Loss=0.5302,local_loss: 0.12210218765140513,global_loss: 0.4080805480480194\n",
      "Step 85: BSNE_Loss=0.8437,local_loss: 0.08127658099444744,global_loss: 0.7623869776725769\n",
      "Step 86: BSNE_Loss=0.8924,local_loss: 0.16455014282166364,global_loss: 0.7278047204017639\n",
      "Step 87: BSNE_Loss=0.9033,local_loss: 0.3035320908010491,global_loss: 0.5997987389564514\n",
      "Step 88: BSNE_Loss=0.9987,local_loss: 0.12098593612720628,global_loss: 0.8776820302009583\n",
      "Step 89: BSNE_Loss=0.6886,local_loss: 0.18571151414537204,global_loss: 0.5028672814369202\n",
      "Step 90: BSNE_Loss=0.6914,local_loss: 0.21161132254589077,global_loss: 0.47979000210762024\n",
      "Step 91: BSNE_Loss=1.0200,local_loss: 0.39620933234288247,global_loss: 0.6237998008728027\n",
      "Step 92: BSNE_Loss=0.9825,local_loss: 0.3838997263032409,global_loss: 0.5986046195030212\n",
      "Step 93: BSNE_Loss=0.6321,local_loss: 0.154485066063132,global_loss: 0.47757434844970703\n",
      "Step 94: BSNE_Loss=0.7192,local_loss: 0.2830990444384793,global_loss: 0.43607524037361145\n",
      "Step 95: BSNE_Loss=0.7785,local_loss: 0.21075221654596596,global_loss: 0.5677338242530823\n",
      "Step 96: BSNE_Loss=0.8263,local_loss: 0.3436553444277318,global_loss: 0.48263466358184814\n",
      "Step 97: BSNE_Loss=1.0837,local_loss: 0.2876953445164308,global_loss: 0.79597008228302\n",
      "Step 98: BSNE_Loss=0.8719,local_loss: 0.3020354953195389,global_loss: 0.5698235034942627\n",
      "Step 99: BSNE_Loss=0.7080,local_loss: 0.1903094265600626,global_loss: 0.5177138447761536\n",
      "Step 100: BSNE_Loss=0.8086,local_loss: 0.37317239815504494,global_loss: 0.4354497790336609\n",
      "Step 101: BSNE_Loss=0.8699,local_loss: 0.2390560017782728,global_loss: 0.6308356523513794\n",
      "Step 102: BSNE_Loss=0.3986,local_loss: 0.2621828453365606,global_loss: 0.1363973617553711\n",
      "Step 103: BSNE_Loss=0.5773,local_loss: 0.2998609187680099,global_loss: 0.2774014472961426\n",
      "Step 104: BSNE_Loss=0.9826,local_loss: 0.3839694200134356,global_loss: 0.5985813140869141\n",
      "Step 105: BSNE_Loss=0.4916,local_loss: 0.10132193583322441,global_loss: 0.39024606347084045\n",
      "Step 106: BSNE_Loss=0.4617,local_loss: 0.14059915156983446,global_loss: 0.3210977017879486\n",
      "Step 107: BSNE_Loss=0.6868,local_loss: 0.12863564153521304,global_loss: 0.5581948161125183\n",
      "Step 108: BSNE_Loss=0.3836,local_loss: 0.26585862271296357,global_loss: 0.11773863434791565\n",
      "Step 109: BSNE_Loss=0.6651,local_loss: 0.3011328147703469,global_loss: 0.3639451265335083\n",
      "Step 110: BSNE_Loss=0.4352,local_loss: 0.1427978912869952,global_loss: 0.29243138432502747\n",
      "Step 111: BSNE_Loss=0.2524,local_loss: 0.20726390307411646,global_loss: 0.04517393186688423\n",
      "Step 112: BSNE_Loss=0.5530,local_loss: 0.11411507140735241,global_loss: 0.43892285227775574\n",
      "Step 113: BSNE_Loss=1.1130,local_loss: 0.10576747272461595,global_loss: 1.007213830947876\n",
      "Step 114: BSNE_Loss=0.1341,local_loss: 0.11713631113788163,global_loss: 0.01696409098803997\n",
      "Center node 13 finished at step 115 with loss 0.1341\n",
      "\n",
      "=== Pretraining Epoch 14 (Center Node: 14) ===\n",
      "Step 0: BSNE_Loss=0.7626,local_loss: 0.15363422430184742,global_loss: 0.6089168787002563\n",
      "Step 1: BSNE_Loss=0.7933,local_loss: 0.23410075382407172,global_loss: 0.5592018961906433\n",
      "Step 2: BSNE_Loss=0.5490,local_loss: 0.11721053969664168,global_loss: 0.43175381422042847\n",
      "Step 3: BSNE_Loss=0.8893,local_loss: 0.3740983739648222,global_loss: 0.5151685476303101\n",
      "Step 4: BSNE_Loss=0.9830,local_loss: 0.5150450103505753,global_loss: 0.46795183420181274\n",
      "Step 5: BSNE_Loss=0.4033,local_loss: 0.30636066028524306,global_loss: 0.0969100072979927\n",
      "Step 6: BSNE_Loss=0.1807,local_loss: 0.21847181924344775,global_loss: -0.03774501010775566\n",
      "Center node 14 finished at step 7 with loss 0.1807\n",
      "\n",
      "=== Pretraining Epoch 15 (Center Node: 15) ===\n",
      "Step 0: BSNE_Loss=0.5606,local_loss: 0.23059499378369716,global_loss: 0.3299962878227234\n",
      "Step 1: BSNE_Loss=0.5325,local_loss: 0.21170992148553872,global_loss: 0.3208097219467163\n",
      "Step 2: BSNE_Loss=0.8704,local_loss: 0.26414007101761156,global_loss: 0.6062180995941162\n",
      "Step 3: BSNE_Loss=0.3551,local_loss: 0.15252242988952788,global_loss: 0.20259340107440948\n",
      "Step 4: BSNE_Loss=0.4245,local_loss: 0.16565637197208305,global_loss: 0.2588522136211395\n",
      "Step 5: BSNE_Loss=0.6722,local_loss: 0.16862620359199018,global_loss: 0.5035489201545715\n",
      "Step 6: BSNE_Loss=0.3897,local_loss: 0.29567815652513657,global_loss: 0.09406650811433792\n",
      "Step 7: BSNE_Loss=0.3443,local_loss: 0.19491762700768767,global_loss: 0.1493474245071411\n",
      "Step 8: BSNE_Loss=0.0262,local_loss: 0.15451326286606168,global_loss: -0.12829826772212982\n",
      "Center node 15 finished at step 9 with loss 0.0262\n",
      "\n",
      "=== Pretraining Epoch 16 (Center Node: 16) ===\n",
      "Step 0: BSNE_Loss=1.1268,local_loss: 0.2655594075732158,global_loss: 0.8612750768661499\n",
      "Step 1: BSNE_Loss=0.5585,local_loss: 0.17097375720256885,global_loss: 0.38748785853385925\n",
      "Step 2: BSNE_Loss=0.6312,local_loss: 0.04489597861661217,global_loss: 0.5863227844238281\n",
      "Step 3: BSNE_Loss=0.7996,local_loss: 0.190013351922385,global_loss: 0.6095888614654541\n",
      "Step 4: BSNE_Loss=0.8331,local_loss: 0.2244801825944205,global_loss: 0.6086332201957703\n",
      "Step 5: BSNE_Loss=0.7336,local_loss: -0.013070993804840505,global_loss: 0.7466692328453064\n",
      "Step 6: BSNE_Loss=0.5259,local_loss: 0.17902160953501395,global_loss: 0.3468301296234131\n",
      "Step 7: BSNE_Loss=0.7018,local_loss: 0.11099474806911251,global_loss: 0.5908267498016357\n",
      "Step 8: BSNE_Loss=0.9087,local_loss: 0.06764663127533864,global_loss: 0.8410330414772034\n",
      "Step 9: BSNE_Loss=0.6588,local_loss: 0.10328831951358393,global_loss: 0.5555239915847778\n",
      "Step 10: BSNE_Loss=0.8677,local_loss: 0.15929024968137057,global_loss: 0.7084184885025024\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 11: BSNE_Loss=0.6818,local_loss: 0.11548791177281525,global_loss: 0.5663377046585083\n",
      "Step 12: BSNE_Loss=0.6822,local_loss: 0.06736925334456131,global_loss: 0.6148291826248169\n",
      "Step 13: BSNE_Loss=0.2284,local_loss: 0.21618084329905066,global_loss: 0.012210735119879246\n",
      "Step 14: BSNE_Loss=0.3781,local_loss: 0.028539149350604493,global_loss: 0.3495883345603943\n",
      "Step 15: BSNE_Loss=0.3121,local_loss: 0.1383130834702597,global_loss: 0.17374326288700104\n",
      "Step 16: BSNE_Loss=0.6685,local_loss: 0.0829761770979688,global_loss: 0.5855732560157776\n",
      "Step 17: BSNE_Loss=0.6393,local_loss: 0.09707848164185422,global_loss: 0.5421942472457886\n",
      "Step 18: BSNE_Loss=0.4189,local_loss: 0.08990985209078402,global_loss: 0.3289874792098999\n",
      "Step 19: BSNE_Loss=0.6158,local_loss: 0.08819167734844596,global_loss: 0.5276058912277222\n",
      "Step 20: BSNE_Loss=0.5408,local_loss: 0.03233145690561007,global_loss: 0.5084735751152039\n",
      "Step 21: BSNE_Loss=1.0454,local_loss: 0.07904808973740408,global_loss: 0.9663983583450317\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 155\u001b[39m\n\u001b[32m    153\u001b[39m bu_node_features = []\n\u001b[32m    154\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m node_idx \u001b[38;5;129;01min\u001b[39;00m bu_nodes:\n\u001b[32m--> \u001b[39m\u001b[32m155\u001b[39m     node_subgraph = \u001b[43mcreate_node_subgraph\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnode_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfeat_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_indexs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    156\u001b[39m     _, node_feature = bsne_model([node_subgraph])\n\u001b[32m    157\u001b[39m     bu_node_features.append(node_feature.squeeze(\u001b[32m0\u001b[39m))\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 70\u001b[39m, in \u001b[36mcreate_node_subgraph\u001b[39m\u001b[34m(node_idx, feat_data, edge_indexs, device)\u001b[39m\n\u001b[32m     68\u001b[39m sub_edge_index = []\n\u001b[32m     69\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m rel_idx \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(edge_indexs)):\n\u001b[32m---> \u001b[39m\u001b[32m70\u001b[39m     edge_index = \u001b[43medge_indexs\u001b[49m\u001b[43m[\u001b[49m\u001b[43mrel_idx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcpu\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m.numpy()\n\u001b[32m     71\u001b[39m     mask = np.isin(edge_index[\u001b[32m0\u001b[39m], sub_nodes) & np.isin(edge_index[\u001b[32m1\u001b[39m], sub_nodes)\n\u001b[32m     72\u001b[39m     local_edges = edge_index[:, mask]\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 进行完整测试（新）（重复在一个子图上进行训练，loss中加入平方项）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "#     \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.001, #0.0001\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "    \n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "center_indices = list(range(feat_data.shape[0]))\n",
    "loss_threshold = 0.2\n",
    "sample_size = 50\n",
    "max_steps_per_center = 1000  # 防止死循环\n",
    "\n",
    "for epoch, center_idx in enumerate(center_indices):\n",
    "    print(f\"\\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===\")\n",
    "    step = 0\n",
    "    \n",
    "    # 构建Bp子图\n",
    "    dist_row = dist_matrix[center_idx].cpu().numpy()\n",
    "    probs = np.power(10.0, -dist_row)\n",
    "    probs[center_idx] = 0\n",
    "    probs /= probs.sum()\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "        bp_nodes = [center_idx] + neighbors.tolist()\n",
    "    else:\n",
    "        bp_nodes = [center_idx]\n",
    "\n",
    "    # 构建Bu子图\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "        bu_nodes = [center_idx] + neighbors.tolist()\n",
    "    else:\n",
    "        bu_nodes = [center_idx]\n",
    "        \n",
    "    while True:\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        total_loss = 0.0  # 每个step都要重置\n",
    "        eps = 1e-10\n",
    "        \n",
    "        # 计算Bp子图中所有节点的特征\n",
    "        bp_node_features = []\n",
    "        for node_idx in bp_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "        bp_features = torch.stack(bp_node_features)\n",
    "        \n",
    "        # 计算Bu子图中所有节点的特征\n",
    "        bu_node_features = []\n",
    "        for node_idx in bu_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "        bu_features = torch.stack(bu_node_features)\n",
    "        \n",
    "        # 计算loss\n",
    "        center_feature = bp_features[0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_nodes[1:]\n",
    "        other_bp_features = bp_features[1:]\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            # 计算中心节点到其他Bp节点的原始距离\n",
    "            orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "            # 计算P向量（基于节点路径距离）\n",
    "            P = np.exp(-orig_dists_bp)\n",
    "            P_sum = P.sum()\n",
    "            P = P / (P_sum + eps)\n",
    "            P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "        # 计算中心节点到其他Bp节点的特征距离\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "        feat_dists_bp.requires_grad_(True)\n",
    "\n",
    "        \n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "        \n",
    "#         log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        log_ratio = torch.log(((P + eps)/(Q + eps))**2)\n",
    "        loss_local = log_ratio.mean() \n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features_ = bu_features[1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features_).squeeze(0)\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        \n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10)**2)\n",
    "\n",
    "        # = 5. 损失合并 =\n",
    "        total_loss += loss_local + loss_global\n",
    "        \n",
    "        total_loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        print(f\"Step {step}: BSNE_Loss={total_loss.item():.4f},local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        step += 1\n",
    "        \n",
    "        if total_loss.item() < loss_threshold or step >= max_steps_per_center:\n",
    "            print(f\"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}\")\n",
    "            break\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c337c395",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "37cc12a8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "bbe64881",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "\n",
      "=== Pretraining Epoch 0 (Center Node: 0) ===\n",
      "Step 0: BSNE_Loss=0.2267,local_loss: 0.13413366277376293,global_loss: 0.09255734086036682\n",
      "Step 1: BSNE_Loss=0.6362,local_loss: 0.1762406213596631,global_loss: 0.4599322974681854\n",
      "Step 2: BSNE_Loss=0.4342,local_loss: 0.17411853708324238,global_loss: 0.26008492708206177\n",
      "Step 3: BSNE_Loss=0.3888,local_loss: 0.1543814537956508,global_loss: 0.23446136713027954\n",
      "Step 4: BSNE_Loss=0.3469,local_loss: 0.15120244555936854,global_loss: 0.19569408893585205\n",
      "Step 5: BSNE_Loss=0.0123,local_loss: 0.25786517966378425,global_loss: -0.24560408294200897\n",
      "Center node 0 finished at step 6 with loss 0.0123\n",
      "\n",
      "=== Pretraining Epoch 1 (Center Node: 1) ===\n",
      "Step 0: BSNE_Loss=0.5244,local_loss: 0.11035573198791329,global_loss: 0.4140417277812958\n",
      "Step 1: BSNE_Loss=0.7616,local_loss: 0.02174652504898707,global_loss: 0.7398914098739624\n",
      "Step 2: BSNE_Loss=0.4602,local_loss: 0.23662366049376451,global_loss: 0.22355450689792633\n",
      "Step 3: BSNE_Loss=0.4792,local_loss: 0.14329123815542294,global_loss: 0.33588698506355286\n",
      "Step 4: BSNE_Loss=0.3660,local_loss: 0.13155968425520947,global_loss: 0.2344772070646286\n",
      "Step 5: BSNE_Loss=0.2463,local_loss: 0.19561564859611635,global_loss: 0.05069950222969055\n",
      "Step 6: BSNE_Loss=0.6022,local_loss: 0.16551111683324693,global_loss: 0.4366995096206665\n",
      "Step 7: BSNE_Loss=0.5192,local_loss: 0.1714475782398963,global_loss: 0.34776580333709717\n",
      "Step 8: BSNE_Loss=0.5879,local_loss: 0.21790648305679994,global_loss: 0.369976669549942\n",
      "Step 9: BSNE_Loss=0.6026,local_loss: 0.11307957041942483,global_loss: 0.48950260877609253\n",
      "Step 10: BSNE_Loss=0.4827,local_loss: 0.09565695112446608,global_loss: 0.3870812654495239\n",
      "Step 11: BSNE_Loss=0.3357,local_loss: 0.1224876201900935,global_loss: 0.2132389098405838\n",
      "Step 12: BSNE_Loss=0.3507,local_loss: 0.12581668925932335,global_loss: 0.2248612642288208\n",
      "Step 13: BSNE_Loss=0.6624,local_loss: 0.23112841899304953,global_loss: 0.43123549222946167\n",
      "Step 14: BSNE_Loss=0.3940,local_loss: 0.12627721513977705,global_loss: 0.2677029073238373\n",
      "Step 15: BSNE_Loss=0.5422,local_loss: 0.20664384431303998,global_loss: 0.33560502529144287\n",
      "Step 16: BSNE_Loss=0.5394,local_loss: 0.1754996162156487,global_loss: 0.3639161288738251\n",
      "Step 17: BSNE_Loss=0.5436,local_loss: 0.15367361620461295,global_loss: 0.3899228572845459\n",
      "Step 18: BSNE_Loss=0.1870,local_loss: 0.10724829164189573,global_loss: 0.0797736644744873\n",
      "Center node 1 finished at step 19 with loss 0.1870\n",
      "\n",
      "=== Pretraining Epoch 2 (Center Node: 2) ===\n",
      "Step 0: BSNE_Loss=0.2816,local_loss: 0.17578784918252655,global_loss: 0.10583316534757614\n",
      "Step 1: BSNE_Loss=0.5906,local_loss: 0.12293513803083572,global_loss: 0.4676210284233093\n",
      "Step 2: BSNE_Loss=0.5445,local_loss: 0.1717286664197618,global_loss: 0.37275177240371704\n",
      "Step 3: BSNE_Loss=0.5791,local_loss: 0.25185408532711306,global_loss: 0.32719939947128296\n",
      "Step 4: BSNE_Loss=0.4105,local_loss: 0.2573578459006394,global_loss: 0.15316811203956604\n",
      "Step 5: BSNE_Loss=0.4324,local_loss: 0.09135577793119963,global_loss: 0.34101229906082153\n",
      "Step 6: BSNE_Loss=0.4110,local_loss: 0.13505499226080583,global_loss: 0.2759696841239929\n",
      "Step 7: BSNE_Loss=0.5254,local_loss: 0.06976242388549386,global_loss: 0.4556032419204712\n",
      "Step 8: BSNE_Loss=0.2165,local_loss: 0.1310224339830928,global_loss: 0.08550599217414856\n",
      "Step 9: BSNE_Loss=0.5466,local_loss: 0.2085364581622026,global_loss: 0.3381071984767914\n",
      "Step 10: BSNE_Loss=0.4294,local_loss: 0.056754667852892224,global_loss: 0.3726777136325836\n",
      "Step 11: BSNE_Loss=0.6887,local_loss: 0.11061137096583248,global_loss: 0.5780960321426392\n",
      "Step 12: BSNE_Loss=0.5984,local_loss: 0.1332844796851196,global_loss: 0.4651249051094055\n",
      "Step 13: BSNE_Loss=0.2931,local_loss: 0.16099855962582937,global_loss: 0.13213816285133362\n",
      "Step 14: BSNE_Loss=0.5646,local_loss: 0.13646749183025572,global_loss: 0.4281419813632965\n",
      "Step 15: BSNE_Loss=0.5664,local_loss: 0.09305602513718164,global_loss: 0.4733274579048157\n",
      "Step 16: BSNE_Loss=0.6337,local_loss: 0.10349595856762779,global_loss: 0.530171275138855\n",
      "Step 17: BSNE_Loss=0.7227,local_loss: 0.17914438733807658,global_loss: 0.5435230135917664\n",
      "Step 18: BSNE_Loss=0.3602,local_loss: 0.12023467288953833,global_loss: 0.23992086946964264\n",
      "Step 19: BSNE_Loss=0.4549,local_loss: 0.09694251024580997,global_loss: 0.35792264342308044\n",
      "Step 20: BSNE_Loss=0.6931,local_loss: 0.18428805014880986,global_loss: 0.5087641477584839\n",
      "Step 21: BSNE_Loss=0.6805,local_loss: 0.2146476133246717,global_loss: 0.46588605642318726\n",
      "Step 22: BSNE_Loss=0.5871,local_loss: 0.133697304229878,global_loss: 0.45341312885284424\n",
      "Step 23: BSNE_Loss=0.3428,local_loss: 0.14038921468208157,global_loss: 0.2023799866437912\n",
      "Step 24: BSNE_Loss=0.3224,local_loss: 0.11077067233981222,global_loss: 0.21161678433418274\n",
      "Step 25: BSNE_Loss=0.4269,local_loss: 0.12757808902510695,global_loss: 0.29933157563209534\n",
      "Step 26: BSNE_Loss=0.6086,local_loss: 0.17179157948685364,global_loss: 0.4367740750312805\n",
      "Step 27: BSNE_Loss=0.7969,local_loss: 0.30223549031985864,global_loss: 0.49461883306503296\n",
      "Step 28: BSNE_Loss=0.2436,local_loss: 0.121717229520254,global_loss: 0.12193042784929276\n",
      "Step 29: BSNE_Loss=0.5832,local_loss: 0.1242625421496625,global_loss: 0.4589873254299164\n",
      "Step 30: BSNE_Loss=0.1466,local_loss: 0.21103769067991215,global_loss: -0.06440272927284241\n",
      "Center node 2 finished at step 31 with loss 0.1466\n",
      "\n",
      "=== Pretraining Epoch 3 (Center Node: 3) ===\n",
      "Step 0: BSNE_Loss=0.6910,local_loss: 0.11492086327322606,global_loss: 0.5760933756828308\n",
      "Step 1: BSNE_Loss=0.3959,local_loss: 0.08487829376400012,global_loss: 0.31105729937553406\n",
      "Step 2: BSNE_Loss=0.6519,local_loss: 0.15778723257400576,global_loss: 0.49406784772872925\n",
      "Step 3: BSNE_Loss=0.6581,local_loss: 0.14835915326818552,global_loss: 0.5097876787185669\n",
      "Step 4: BSNE_Loss=0.7739,local_loss: 0.2001108036039941,global_loss: 0.5738065242767334\n",
      "Step 5: BSNE_Loss=0.6527,local_loss: 0.17526048754133647,global_loss: 0.477395236492157\n",
      "Step 6: BSNE_Loss=0.5843,local_loss: 0.0590602542735137,global_loss: 0.5252742767333984\n",
      "Step 7: BSNE_Loss=0.6060,local_loss: 0.13343655216587985,global_loss: 0.4725170135498047\n",
      "Step 8: BSNE_Loss=0.5342,local_loss: 0.11946249087142624,global_loss: 0.4147373139858246\n",
      "Step 9: BSNE_Loss=0.8070,local_loss: 0.1251801057752769,global_loss: 0.6818667650222778\n",
      "Step 10: BSNE_Loss=0.9162,local_loss: 0.2119961950760604,global_loss: 0.704225480556488\n",
      "Step 11: BSNE_Loss=0.3454,local_loss: 0.15289395216038812,global_loss: 0.1925257295370102\n",
      "Step 12: BSNE_Loss=0.5784,local_loss: 0.10634789779902733,global_loss: 0.4720752239227295\n",
      "Step 16: BSNE_Loss=0.5957,local_loss: 0.18162824466110603,global_loss: 0.41404834389686584\n",
      "Step 17: BSNE_Loss=0.4745,local_loss: 0.13425533484430127,global_loss: 0.3402576744556427\n",
      "Step 18: BSNE_Loss=0.8664,local_loss: 0.2617858192661792,global_loss: 0.6045773029327393\n",
      "Step 19: BSNE_Loss=0.6314,local_loss: 0.11471996109439504,global_loss: 0.5166605710983276\n",
      "Step 20: BSNE_Loss=0.8387,local_loss: 0.23777634427166341,global_loss: 0.6008870601654053\n",
      "Step 21: BSNE_Loss=0.2297,local_loss: 0.1451623384267341,global_loss: 0.08451375365257263\n",
      "Step 22: BSNE_Loss=0.3934,local_loss: 0.1703651598592005,global_loss: 0.2230357825756073\n",
      "Step 23: BSNE_Loss=0.3478,local_loss: 0.10617648852530794,global_loss: 0.2415812909603119\n",
      "Step 24: BSNE_Loss=0.6386,local_loss: 0.17779713306528475,global_loss: 0.46081024408340454\n",
      "Step 25: BSNE_Loss=0.5241,local_loss: 0.24855035748631873,global_loss: 0.27552559971809387\n",
      "Step 26: BSNE_Loss=0.6552,local_loss: 0.047639658277586774,global_loss: 0.6075900793075562\n",
      "Step 27: BSNE_Loss=0.8858,local_loss: 0.1381840486459669,global_loss: 0.7475718259811401\n",
      "Step 28: BSNE_Loss=0.5767,local_loss: 0.11267548451233918,global_loss: 0.4640558362007141\n",
      "Step 29: BSNE_Loss=0.4225,local_loss: 0.09056362644820552,global_loss: 0.3319750130176544\n",
      "Step 30: BSNE_Loss=0.5794,local_loss: 0.118591433157456,global_loss: 0.46082133054733276\n",
      "Step 31: BSNE_Loss=0.4877,local_loss: 0.15774709820738278,global_loss: 0.32991117238998413\n",
      "Step 32: BSNE_Loss=0.5204,local_loss: 0.09390613394587213,global_loss: 0.4264447093009949\n",
      "Step 33: BSNE_Loss=0.6141,local_loss: 0.14182192606716734,global_loss: 0.4722317159175873\n",
      "Step 34: BSNE_Loss=0.5780,local_loss: 0.11368648652425663,global_loss: 0.4643397927284241\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 35: BSNE_Loss=0.5074,local_loss: 0.09605763896283222,global_loss: 0.4113371670246124\n",
      "Step 36: BSNE_Loss=0.8042,local_loss: 0.10286647727356972,global_loss: 0.7013341188430786\n",
      "Step 37: BSNE_Loss=0.6241,local_loss: 0.31218662345934334,global_loss: 0.31195268034935\n",
      "Step 38: BSNE_Loss=0.5636,local_loss: 0.154470839774436,global_loss: 0.4091339111328125\n",
      "Step 39: BSNE_Loss=0.6163,local_loss: 0.23618512349203452,global_loss: 0.3801455795764923\n",
      "Step 40: BSNE_Loss=0.7260,local_loss: 0.13839531457223134,global_loss: 0.5876429080963135\n",
      "Step 41: BSNE_Loss=0.4998,local_loss: 0.13800837694635162,global_loss: 0.36181214451789856\n",
      "Step 42: BSNE_Loss=0.5913,local_loss: 0.09145024712981968,global_loss: 0.49983271956443787\n",
      "Step 43: BSNE_Loss=0.3556,local_loss: 0.10716135021064778,global_loss: 0.24844326078891754\n",
      "Step 44: BSNE_Loss=0.5914,local_loss: 0.08602418248025646,global_loss: 0.5053338408470154\n",
      "Step 45: BSNE_Loss=0.5943,local_loss: 0.03820413753035663,global_loss: 0.5561401844024658\n",
      "Step 46: BSNE_Loss=0.6728,local_loss: 0.1597207382010574,global_loss: 0.5130375623703003\n",
      "Step 47: BSNE_Loss=0.5952,local_loss: 0.05491537117160406,global_loss: 0.5402575731277466\n",
      "Step 48: BSNE_Loss=0.7021,local_loss: 0.16216114887306354,global_loss: 0.5399178862571716\n",
      "Step 49: BSNE_Loss=0.5809,local_loss: 0.05319501272843523,global_loss: 0.527664840221405\n",
      "Step 50: BSNE_Loss=0.3475,local_loss: 0.08742179261409959,global_loss: 0.26012131571769714\n",
      "Step 51: BSNE_Loss=0.4930,local_loss: 0.04666241237007698,global_loss: 0.4463859796524048\n",
      "Step 52: BSNE_Loss=0.6270,local_loss: 0.11339278165698854,global_loss: 0.5136361122131348\n",
      "Step 53: BSNE_Loss=0.4708,local_loss: 0.10779832990521858,global_loss: 0.362983763217926\n",
      "Step 54: BSNE_Loss=0.6180,local_loss: 0.06947078628886803,global_loss: 0.5485657453536987\n",
      "Step 55: BSNE_Loss=0.6130,local_loss: 0.0616319789917712,global_loss: 0.5513813495635986\n",
      "Step 56: BSNE_Loss=0.3030,local_loss: 0.12624561553247385,global_loss: 0.176798015832901\n",
      "Step 57: BSNE_Loss=0.7142,local_loss: 0.11899796079016133,global_loss: 0.5951805114746094\n",
      "Step 58: BSNE_Loss=0.5725,local_loss: 0.08642600608088491,global_loss: 0.4860801696777344\n",
      "Step 59: BSNE_Loss=0.5195,local_loss: 0.24197747860143343,global_loss: 0.277508407831192\n",
      "Step 60: BSNE_Loss=0.2920,local_loss: 0.1654132956248649,global_loss: 0.12660670280456543\n",
      "Step 61: BSNE_Loss=0.3595,local_loss: 0.16076161451253504,global_loss: 0.19873440265655518\n",
      "Step 62: BSNE_Loss=0.5325,local_loss: 0.07500012375401621,global_loss: 0.45745009183883667\n",
      "Step 63: BSNE_Loss=0.5936,local_loss: 0.2818076298256188,global_loss: 0.3117770850658417\n",
      "Step 64: BSNE_Loss=0.6959,local_loss: 0.20384776575718508,global_loss: 0.4920029938220978\n",
      "Step 65: BSNE_Loss=0.6395,local_loss: 0.09300281056380222,global_loss: 0.5464741587638855\n",
      "Step 66: BSNE_Loss=0.5201,local_loss: 0.09164307945152775,global_loss: 0.4284621775150299\n",
      "Step 67: BSNE_Loss=0.4371,local_loss: 0.03114990274951948,global_loss: 0.4059532582759857\n",
      "Step 68: BSNE_Loss=0.4153,local_loss: 0.03065758613716794,global_loss: 0.3846692740917206\n",
      "Step 69: BSNE_Loss=0.5605,local_loss: 0.14101395616200507,global_loss: 0.4195290803909302\n",
      "Step 70: BSNE_Loss=0.4936,local_loss: 0.10901230998064142,global_loss: 0.38455381989479065\n",
      "Step 71: BSNE_Loss=0.6010,local_loss: 0.13256420919145318,global_loss: 0.46847474575042725\n",
      "Step 72: BSNE_Loss=0.4214,local_loss: 0.06971640269752728,global_loss: 0.3517012298107147\n",
      "Step 73: BSNE_Loss=0.4451,local_loss: 0.07477299556590902,global_loss: 0.3703145682811737\n",
      "Step 74: BSNE_Loss=0.4985,local_loss: 0.16977997135111103,global_loss: 0.3287571966648102\n",
      "Step 75: BSNE_Loss=0.5119,local_loss: 0.0940431006747415,global_loss: 0.41783225536346436\n",
      "Step 76: BSNE_Loss=0.2670,local_loss: 0.11126258401751152,global_loss: 0.15568767488002777\n",
      "Step 77: BSNE_Loss=0.4734,local_loss: 0.15497962502975177,global_loss: 0.3184535503387451\n",
      "Step 78: BSNE_Loss=0.8622,local_loss: 0.27308240758698993,global_loss: 0.5891168117523193\n",
      "Step 79: BSNE_Loss=0.4339,local_loss: 0.1008913155259319,global_loss: 0.3329603672027588\n",
      "Step 80: BSNE_Loss=0.4016,local_loss: 0.22342799745139819,global_loss: 0.17819420993328094\n",
      "Step 81: BSNE_Loss=0.5542,local_loss: 0.07615208804529441,global_loss: 0.47801917791366577\n",
      "Step 82: BSNE_Loss=0.5834,local_loss: 0.05593733956471342,global_loss: 0.527452290058136\n",
      "Step 83: BSNE_Loss=0.5392,local_loss: 0.044387849685144476,global_loss: 0.49482131004333496\n",
      "Step 84: BSNE_Loss=0.5670,local_loss: 0.1193451870918272,global_loss: 0.447627991437912\n",
      "Step 85: BSNE_Loss=0.6490,local_loss: 0.1096568167094036,global_loss: 0.5393202304840088\n",
      "Step 86: BSNE_Loss=0.5310,local_loss: 0.08175391118025772,global_loss: 0.4492647051811218\n",
      "Step 87: BSNE_Loss=0.6498,local_loss: 0.15460998648637045,global_loss: 0.4952268600463867\n",
      "Step 88: BSNE_Loss=0.7440,local_loss: 0.08970776084543058,global_loss: 0.6542933583259583\n",
      "Step 89: BSNE_Loss=0.5022,local_loss: 0.10073361865997525,global_loss: 0.401511549949646\n",
      "Step 90: BSNE_Loss=0.3818,local_loss: 0.08666926335483051,global_loss: 0.295146107673645\n",
      "Step 91: BSNE_Loss=0.3501,local_loss: 0.09327582233788138,global_loss: 0.25678104162216187\n",
      "Step 92: BSNE_Loss=0.5049,local_loss: 0.12591545935644521,global_loss: 0.3789680302143097\n",
      "Step 93: BSNE_Loss=0.4817,local_loss: 0.10658677481940423,global_loss: 0.37512022256851196\n",
      "Step 94: BSNE_Loss=0.4187,local_loss: 0.08552942767883404,global_loss: 0.3331647515296936\n",
      "Step 95: BSNE_Loss=0.7434,local_loss: 0.05698224733294836,global_loss: 0.686431884765625\n",
      "Step 96: BSNE_Loss=0.2316,local_loss: 0.04907513859872431,global_loss: 0.1824931502342224\n",
      "Step 97: BSNE_Loss=0.5088,local_loss: 0.07817359527557499,global_loss: 0.4306519031524658\n",
      "Step 98: BSNE_Loss=0.3941,local_loss: 0.14195616383921528,global_loss: 0.25210803747177124\n",
      "Step 99: BSNE_Loss=0.3352,local_loss: 0.06798623268131579,global_loss: 0.2671663463115692\n",
      "Step 100: BSNE_Loss=0.6988,local_loss: 0.18667556960495424,global_loss: 0.5121281147003174\n",
      "Step 101: BSNE_Loss=0.4079,local_loss: 0.09275959111999212,global_loss: 0.31511053442955017\n",
      "Step 102: BSNE_Loss=0.5091,local_loss: 0.11387444480556451,global_loss: 0.3952750861644745\n",
      "Step 103: BSNE_Loss=0.5009,local_loss: 0.10848651346665016,global_loss: 0.3924572467803955\n",
      "Step 104: BSNE_Loss=0.3772,local_loss: 0.12780276932107787,global_loss: 0.2494312971830368\n",
      "Step 105: BSNE_Loss=0.4888,local_loss: 0.08440277021622676,global_loss: 0.4043987989425659\n",
      "Step 106: BSNE_Loss=0.7527,local_loss: -0.0030069678735214936,global_loss: 0.7557539343833923\n",
      "Step 107: BSNE_Loss=0.6543,local_loss: 0.11902845029624498,global_loss: 0.5352386832237244\n",
      "Step 108: BSNE_Loss=0.3903,local_loss: 0.10051908303282074,global_loss: 0.2897983491420746\n",
      "Step 109: BSNE_Loss=0.4277,local_loss: 0.09296329207713344,global_loss: 0.33477503061294556\n",
      "Step 110: BSNE_Loss=0.4574,local_loss: 0.1057517118731581,global_loss: 0.3516883850097656\n",
      "Step 111: BSNE_Loss=0.5168,local_loss: 0.11567864944731915,global_loss: 0.40112099051475525\n",
      "Step 112: BSNE_Loss=0.4459,local_loss: 0.11844208859516515,global_loss: 0.3274844288825989\n",
      "Step 113: BSNE_Loss=0.5340,local_loss: 0.08001479700503808,global_loss: 0.4539639353752136\n",
      "Step 114: BSNE_Loss=0.7433,local_loss: 0.20051610417499743,global_loss: 0.5427804589271545\n",
      "Step 115: BSNE_Loss=0.3310,local_loss: 0.14319755720985616,global_loss: 0.18783944845199585\n",
      "Step 116: BSNE_Loss=0.7899,local_loss: 0.0283451955321316,global_loss: 0.761561393737793\n",
      "Step 117: BSNE_Loss=0.6131,local_loss: 0.10502160910984439,global_loss: 0.5080440044403076\n",
      "Step 118: BSNE_Loss=0.4118,local_loss: 0.09414398416188702,global_loss: 0.3176134526729584\n",
      "Step 119: BSNE_Loss=0.5641,local_loss: 0.03774254745279893,global_loss: 0.5263440012931824\n",
      "Step 120: BSNE_Loss=0.6236,local_loss: 0.03866999923462912,global_loss: 0.5849696397781372\n",
      "Step 121: BSNE_Loss=0.3821,local_loss: 0.07150120973147052,global_loss: 0.3105792701244354\n",
      "Step 122: BSNE_Loss=0.3343,local_loss: 0.1268840544243631,global_loss: 0.2074587494134903\n",
      "Step 123: BSNE_Loss=0.7165,local_loss: 0.04684534709563425,global_loss: 0.6696314811706543\n",
      "Step 124: BSNE_Loss=0.6381,local_loss: 0.04653130833422442,global_loss: 0.5916162729263306\n",
      "Step 125: BSNE_Loss=0.2291,local_loss: 0.15531836645208183,global_loss: 0.07382825762033463\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 126: BSNE_Loss=0.2091,local_loss: 0.044254578399250386,global_loss: 0.1648917943239212\n",
      "Step 127: BSNE_Loss=0.3804,local_loss: 0.06525460003120451,global_loss: 0.3151783049106598\n",
      "Step 128: BSNE_Loss=0.6028,local_loss: 0.08193835185313735,global_loss: 0.5208796262741089\n",
      "Step 129: BSNE_Loss=0.2873,local_loss: 0.16107412211731137,global_loss: 0.12624479830265045\n",
      "Step 130: BSNE_Loss=0.3295,local_loss: 0.08833051123887815,global_loss: 0.2412068098783493\n",
      "Step 131: BSNE_Loss=0.3429,local_loss: 0.008555075841460478,global_loss: 0.33438149094581604\n",
      "Step 132: BSNE_Loss=0.5326,local_loss: 0.08202989022686108,global_loss: 0.45059508085250854\n",
      "Step 133: BSNE_Loss=0.7327,local_loss: 0.03785148821158279,global_loss: 0.6948791146278381\n",
      "Step 134: BSNE_Loss=0.4746,local_loss: 0.0892295510887444,global_loss: 0.3853324055671692\n",
      "Step 135: BSNE_Loss=0.3883,local_loss: 0.13611082357252507,global_loss: 0.252178817987442\n",
      "Step 136: BSNE_Loss=0.5274,local_loss: 0.07316406871917963,global_loss: 0.45425915718078613\n",
      "Step 137: BSNE_Loss=0.4094,local_loss: 0.0509886208032564,global_loss: 0.35843655467033386\n",
      "Step 138: BSNE_Loss=0.2539,local_loss: 0.03485709297610748,global_loss: 0.2190483957529068\n",
      "Step 139: BSNE_Loss=0.4685,local_loss: 0.1531210588214136,global_loss: 0.31536591053009033\n",
      "Step 140: BSNE_Loss=0.2651,local_loss: 0.11318447914246087,global_loss: 0.1519288420677185\n",
      "Step 141: BSNE_Loss=0.3699,local_loss: 0.09445744336148089,global_loss: 0.275442510843277\n",
      "Step 142: BSNE_Loss=0.4118,local_loss: 0.09336341393800428,global_loss: 0.3184112310409546\n",
      "Step 143: BSNE_Loss=0.3135,local_loss: 0.1268429068158456,global_loss: 0.1866394579410553\n",
      "Step 144: BSNE_Loss=0.4204,local_loss: 0.03728463857386743,global_loss: 0.38310784101486206\n",
      "Step 145: BSNE_Loss=0.1946,local_loss: 0.1897672593809785,global_loss: 0.004785394296050072\n",
      "Center node 3 finished at step 146 with loss 0.1946\n",
      "\n",
      "=== Pretraining Epoch 4 (Center Node: 4) ===\n",
      "Step 0: BSNE_Loss=0.4207,local_loss: 0.17038577271206565,global_loss: 0.2503451406955719\n",
      "Step 1: BSNE_Loss=0.4351,local_loss: 0.09704907602071383,global_loss: 0.3380720019340515\n",
      "Step 2: BSNE_Loss=0.5149,local_loss: 0.04059652240297008,global_loss: 0.4743437170982361\n",
      "Step 3: BSNE_Loss=0.3954,local_loss: 0.14413498997082877,global_loss: 0.251248300075531\n",
      "Step 4: BSNE_Loss=0.4639,local_loss: 0.14573733518495038,global_loss: 0.31814390420913696\n",
      "Step 5: BSNE_Loss=0.5964,local_loss: 0.1751549193756828,global_loss: 0.42124634981155396\n",
      "Step 6: BSNE_Loss=0.5974,local_loss: 0.12837002067280526,global_loss: 0.4690743684768677\n",
      "Step 7: BSNE_Loss=0.3077,local_loss: 0.09074462981953449,global_loss: 0.21696721017360687\n",
      "Step 8: BSNE_Loss=0.6730,local_loss: 0.15601580581844973,global_loss: 0.5169430375099182\n",
      "Step 9: BSNE_Loss=0.2563,local_loss: 0.10014183917153459,global_loss: 0.15612667798995972\n",
      "Step 10: BSNE_Loss=0.4698,local_loss: 0.06773321993797739,global_loss: 0.4021014869213104\n",
      "Step 11: BSNE_Loss=0.3793,local_loss: 0.1628821225438391,global_loss: 0.2164478451013565\n",
      "Step 12: BSNE_Loss=0.1337,local_loss: 0.06431160419245346,global_loss: 0.06936994940042496\n",
      "Center node 4 finished at step 13 with loss 0.1337\n",
      "\n",
      "=== Pretraining Epoch 5 (Center Node: 5) ===\n",
      "Step 0: BSNE_Loss=0.2812,local_loss: 0.13943542113803623,global_loss: 0.1418003886938095\n",
      "Step 1: BSNE_Loss=0.2142,local_loss: 0.1986712764706501,global_loss: 0.015507590025663376\n",
      "Step 2: BSNE_Loss=0.4630,local_loss: 0.1651953591160143,global_loss: 0.29783573746681213\n",
      "Step 3: BSNE_Loss=0.3925,local_loss: 0.1015086564309241,global_loss: 0.29095011949539185\n",
      "Step 4: BSNE_Loss=0.2777,local_loss: 0.16889223693948746,global_loss: 0.10882212221622467\n",
      "Step 5: BSNE_Loss=0.2401,local_loss: 0.06509377324147347,global_loss: 0.1750040501356125\n",
      "Step 6: BSNE_Loss=0.2858,local_loss: 0.17333796039536714,global_loss: 0.11241866648197174\n",
      "Step 7: BSNE_Loss=0.1452,local_loss: 0.11151858911089128,global_loss: 0.03364047035574913\n",
      "Center node 5 finished at step 8 with loss 0.1452\n",
      "\n",
      "=== Pretraining Epoch 6 (Center Node: 6) ===\n",
      "Step 0: BSNE_Loss=0.4453,local_loss: 0.07634645572936234,global_loss: 0.36895814538002014\n",
      "Step 1: BSNE_Loss=0.2288,local_loss: 0.08309037149819357,global_loss: 0.14569438993930817\n",
      "Step 2: BSNE_Loss=0.1074,local_loss: 0.13179995328970992,global_loss: -0.024379098787903786\n",
      "Center node 6 finished at step 3 with loss 0.1074\n",
      "\n",
      "=== Pretraining Epoch 7 (Center Node: 7) ===\n",
      "Step 0: BSNE_Loss=0.1177,local_loss: 0.11723225285305786,global_loss: 0.000497336674015969\n",
      "Center node 7 finished at step 1 with loss 0.1177\n",
      "\n",
      "=== Pretraining Epoch 8 (Center Node: 8) ===\n",
      "Step 0: BSNE_Loss=0.2968,local_loss: 0.0894953595160339,global_loss: 0.20734356343746185\n",
      "Step 1: BSNE_Loss=0.5819,local_loss: 0.053473367660390764,global_loss: 0.5284549593925476\n",
      "Step 2: BSNE_Loss=0.5143,local_loss: 0.07668199769501585,global_loss: 0.4376169741153717\n",
      "Step 3: BSNE_Loss=0.4563,local_loss: 0.17556921744480802,global_loss: 0.28075146675109863\n",
      "Step 4: BSNE_Loss=0.6358,local_loss: 0.06816289589132588,global_loss: 0.5676448941230774\n",
      "Step 5: BSNE_Loss=0.6350,local_loss: 0.03138527222624669,global_loss: 0.6036200523376465\n",
      "Step 6: BSNE_Loss=0.4463,local_loss: 0.08774484241541868,global_loss: 0.3585483431816101\n",
      "Step 7: BSNE_Loss=0.4626,local_loss: 0.06807093002361345,global_loss: 0.3944794535636902\n",
      "Step 8: BSNE_Loss=0.8266,local_loss: 0.015417755801605431,global_loss: 0.8111560344696045\n",
      "Step 9: BSNE_Loss=0.3653,local_loss: 0.08344246772663479,global_loss: 0.28180837631225586\n",
      "Step 10: BSNE_Loss=0.4219,local_loss: 0.04879642997720018,global_loss: 0.373137891292572\n",
      "Step 11: BSNE_Loss=0.4842,local_loss: 0.08922663795749458,global_loss: 0.39496538043022156\n",
      "Step 12: BSNE_Loss=0.4429,local_loss: 0.1112920031552423,global_loss: 0.33158764243125916\n",
      "Step 13: BSNE_Loss=0.5940,local_loss: 0.04889608238496045,global_loss: 0.545078456401825\n",
      "Step 14: BSNE_Loss=0.4566,local_loss: 0.024714369003351916,global_loss: 0.43193116784095764\n",
      "Step 15: BSNE_Loss=0.4358,local_loss: 0.02921277046327764,global_loss: 0.4066251218318939\n",
      "Step 16: BSNE_Loss=0.4853,local_loss: 0.08898468502836485,global_loss: 0.3963640630245209\n",
      "Step 17: BSNE_Loss=0.6428,local_loss: 0.16875078027483956,global_loss: 0.47400999069213867\n",
      "Step 18: BSNE_Loss=0.7264,local_loss: 0.009140561180117635,global_loss: 0.7172860503196716\n",
      "Step 19: BSNE_Loss=0.6589,local_loss: 0.11232721273110893,global_loss: 0.5465360879898071\n",
      "Step 20: BSNE_Loss=0.4709,local_loss: 0.050667154406752804,global_loss: 0.42027780413627625\n",
      "Step 21: BSNE_Loss=0.6584,local_loss: 0.08327508501622706,global_loss: 0.5751298666000366\n",
      "Step 22: BSNE_Loss=0.5568,local_loss: 0.04336445228526083,global_loss: 0.5134642124176025\n",
      "Step 23: BSNE_Loss=0.7455,local_loss: 0.12332925435714638,global_loss: 0.6221223473548889\n",
      "Step 24: BSNE_Loss=0.7933,local_loss: 0.06200862405489824,global_loss: 0.7312660813331604\n",
      "Step 25: BSNE_Loss=0.3645,local_loss: 0.06137381186603083,global_loss: 0.30317267775535583\n",
      "Step 26: BSNE_Loss=0.3624,local_loss: 0.10839637675588008,global_loss: 0.25400999188423157\n",
      "Step 27: BSNE_Loss=0.4832,local_loss: 0.04447047741839818,global_loss: 0.43872377276420593\n",
      "Step 28: BSNE_Loss=0.4052,local_loss: 0.1142040190920608,global_loss: 0.29100456833839417\n",
      "Step 29: BSNE_Loss=0.6904,local_loss: 0.0599181850563926,global_loss: 0.6304429769515991\n",
      "Step 30: BSNE_Loss=0.7413,local_loss: 0.08237724834718761,global_loss: 0.658945620059967\n",
      "Step 31: BSNE_Loss=0.5129,local_loss: 0.07107447908647074,global_loss: 0.44184014201164246\n",
      "Step 32: BSNE_Loss=0.6117,local_loss: 0.16843247161639416,global_loss: 0.4432324171066284\n",
      "Step 33: BSNE_Loss=0.5349,local_loss: 0.06595711293570476,global_loss: 0.46893906593322754\n",
      "Step 34: BSNE_Loss=0.7145,local_loss: 0.0608876722264366,global_loss: 0.6535995006561279\n",
      "Step 35: BSNE_Loss=0.3524,local_loss: 0.11107155399695887,global_loss: 0.24130618572235107\n",
      "Step 36: BSNE_Loss=0.4149,local_loss: 0.10233260004919703,global_loss: 0.3125741481781006\n",
      "Step 37: BSNE_Loss=0.7271,local_loss: 0.12300861525194787,global_loss: 0.6040964126586914\n",
      "Step 38: BSNE_Loss=0.5969,local_loss: 0.07538928020775752,global_loss: 0.5215005278587341\n",
      "Step 39: BSNE_Loss=0.5259,local_loss: 0.055745457003802054,global_loss: 0.47020435333251953\n",
      "Step 40: BSNE_Loss=0.4284,local_loss: 0.04435870386562808,global_loss: 0.38402050733566284\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 41: BSNE_Loss=0.4040,local_loss: 0.1617676451242907,global_loss: 0.24225348234176636\n",
      "Step 42: BSNE_Loss=0.4076,local_loss: 0.08062120647128129,global_loss: 0.3269824683666229\n",
      "Step 43: BSNE_Loss=0.5484,local_loss: 0.017710372522390275,global_loss: 0.5307269096374512\n",
      "Step 44: BSNE_Loss=0.4542,local_loss: 0.12160638793326622,global_loss: 0.3325483202934265\n",
      "Step 45: BSNE_Loss=0.5043,local_loss: 0.1365610849650127,global_loss: 0.36771833896636963\n",
      "Step 46: BSNE_Loss=0.3383,local_loss: 0.041261953030906344,global_loss: 0.29698991775512695\n",
      "Step 47: BSNE_Loss=0.6552,local_loss: 0.01598104823851508,global_loss: 0.6391717195510864\n",
      "Step 48: BSNE_Loss=0.6206,local_loss: 0.06307085182150156,global_loss: 0.5575310587882996\n",
      "Step 49: BSNE_Loss=0.3402,local_loss: 0.15168597827271071,global_loss: 0.18852800130844116\n",
      "Step 50: BSNE_Loss=0.6980,local_loss: 0.05555323051184157,global_loss: 0.6424650549888611\n",
      "Step 51: BSNE_Loss=0.7249,local_loss: 0.03451331382356087,global_loss: 0.6903942227363586\n",
      "Step 52: BSNE_Loss=0.3099,local_loss: 0.037458365047289124,global_loss: 0.27246734499931335\n",
      "Step 53: BSNE_Loss=0.7224,local_loss: 0.09595631903238573,global_loss: 0.6264137029647827\n",
      "Step 54: BSNE_Loss=0.4673,local_loss: 0.026576385205736103,global_loss: 0.44073083996772766\n",
      "Step 55: BSNE_Loss=0.6359,local_loss: 0.10867991738895555,global_loss: 0.527205228805542\n",
      "Step 56: BSNE_Loss=0.7185,local_loss: 0.012127968425134484,global_loss: 0.7063775658607483\n",
      "Step 57: BSNE_Loss=0.4117,local_loss: 0.03220308424938378,global_loss: 0.3795400857925415\n",
      "Step 58: BSNE_Loss=0.3462,local_loss: 0.053301859017620884,global_loss: 0.29292750358581543\n",
      "Step 59: BSNE_Loss=0.7119,local_loss: 0.11016572637471196,global_loss: 0.601729154586792\n",
      "Step 60: BSNE_Loss=0.5935,local_loss: 0.03355658954329687,global_loss: 0.5599123239517212\n",
      "Step 61: BSNE_Loss=0.6098,local_loss: 0.16136211714337784,global_loss: 0.44840240478515625\n",
      "Step 62: BSNE_Loss=0.4563,local_loss: 0.10756745014023204,global_loss: 0.34869906306266785\n",
      "Step 63: BSNE_Loss=0.5150,local_loss: 0.07328701641587274,global_loss: 0.44174709916114807\n",
      "Step 64: BSNE_Loss=0.3552,local_loss: 0.054309417836003314,global_loss: 0.3009187877178192\n",
      "Step 65: BSNE_Loss=0.3011,local_loss: 0.04494564790807381,global_loss: 0.2561895251274109\n",
      "Step 66: BSNE_Loss=0.3731,local_loss: 0.0521479359660623,global_loss: 0.32092371582984924\n",
      "Step 67: BSNE_Loss=0.4098,local_loss: 0.07330690098900923,global_loss: 0.33645349740982056\n",
      "Step 68: BSNE_Loss=0.3674,local_loss: 0.11683500429673573,global_loss: 0.2505793571472168\n",
      "Step 69: BSNE_Loss=0.5408,local_loss: 0.12411916599802654,global_loss: 0.4166956841945648\n",
      "Step 70: BSNE_Loss=0.5267,local_loss: 0.044338576898751425,global_loss: 0.48234623670578003\n",
      "Step 71: BSNE_Loss=0.4605,local_loss: 0.14176522894682814,global_loss: 0.31875669956207275\n",
      "Step 72: BSNE_Loss=0.4953,local_loss: 0.07232543965294429,global_loss: 0.42294690012931824\n",
      "Step 73: BSNE_Loss=0.7491,local_loss: 0.08740925714334338,global_loss: 0.6616407632827759\n",
      "Step 74: BSNE_Loss=0.4840,local_loss: 0.08093593408655489,global_loss: 0.40306636691093445\n",
      "Step 75: BSNE_Loss=0.6559,local_loss: 0.08913525384304163,global_loss: 0.5667171478271484\n",
      "Step 76: BSNE_Loss=0.6291,local_loss: 0.15459058278009338,global_loss: 0.47455769777297974\n",
      "Step 77: BSNE_Loss=0.4458,local_loss: 0.0939901984593322,global_loss: 0.3518446981906891\n",
      "Step 78: BSNE_Loss=0.4524,local_loss: 0.04733495124550709,global_loss: 0.40507936477661133\n",
      "Step 79: BSNE_Loss=0.6661,local_loss: -0.0008537064586848597,global_loss: 0.6669579148292542\n",
      "Step 80: BSNE_Loss=0.5872,local_loss: 0.07234458379029468,global_loss: 0.5148196220397949\n",
      "Step 81: BSNE_Loss=0.5354,local_loss: 0.07236822149309098,global_loss: 0.46307674050331116\n",
      "Step 82: BSNE_Loss=0.7236,local_loss: 0.207640660509777,global_loss: 0.5159177780151367\n",
      "Step 83: BSNE_Loss=0.4950,local_loss: 0.01838371503680647,global_loss: 0.4765874743461609\n",
      "Step 84: BSNE_Loss=0.5807,local_loss: 0.06673944982823629,global_loss: 0.5139971375465393\n",
      "Step 85: BSNE_Loss=0.5698,local_loss: 0.08257530479964913,global_loss: 0.4871921241283417\n",
      "Step 86: BSNE_Loss=0.6318,local_loss: -0.007731769475635586,global_loss: 0.6395404934883118\n",
      "Step 87: BSNE_Loss=0.3627,local_loss: 0.040107091759988445,global_loss: 0.32254597544670105\n",
      "Step 88: BSNE_Loss=0.4203,local_loss: 0.1393595321691688,global_loss: 0.28095173835754395\n",
      "Step 89: BSNE_Loss=0.5353,local_loss: 0.12642993207365755,global_loss: 0.4088805019855499\n",
      "Step 90: BSNE_Loss=0.5217,local_loss: 0.09299690555704604,global_loss: 0.4287184476852417\n",
      "Step 91: BSNE_Loss=0.6247,local_loss: 0.043760558570374516,global_loss: 0.5809763669967651\n",
      "Step 92: BSNE_Loss=0.5895,local_loss: 0.051224689480867086,global_loss: 0.5382506847381592\n",
      "Step 93: BSNE_Loss=0.7004,local_loss: 0.04394440513429581,global_loss: 0.6564587354660034\n",
      "Step 94: BSNE_Loss=0.6273,local_loss: 0.04495573206188741,global_loss: 0.5823321342468262\n",
      "Step 95: BSNE_Loss=0.5076,local_loss: 0.021868742260310787,global_loss: 0.4857255816459656\n",
      "Step 96: BSNE_Loss=0.5800,local_loss: 0.16206642683130912,global_loss: 0.41790786385536194\n",
      "Step 97: BSNE_Loss=0.2727,local_loss: 0.23812327129070315,global_loss: 0.03460961580276489\n",
      "Step 98: BSNE_Loss=0.7220,local_loss: 0.03064725553368522,global_loss: 0.6913501620292664\n",
      "Step 99: BSNE_Loss=0.6822,local_loss: 0.11122639660057783,global_loss: 0.5709975957870483\n",
      "Step 100: BSNE_Loss=0.5004,local_loss: 0.13419346134779556,global_loss: 0.3661902844905853\n",
      "Step 101: BSNE_Loss=0.5935,local_loss: 0.019116767824747343,global_loss: 0.5743836760520935\n",
      "Step 102: BSNE_Loss=0.7253,local_loss: 0.11614299590203246,global_loss: 0.6091277599334717\n",
      "Step 103: BSNE_Loss=0.3181,local_loss: 0.025160873259051586,global_loss: 0.2929033935070038\n",
      "Step 104: BSNE_Loss=0.4731,local_loss: 0.08163570097301018,global_loss: 0.39151132106781006\n",
      "Step 105: BSNE_Loss=0.5981,local_loss: 0.03305725956244439,global_loss: 0.5650562644004822\n",
      "Step 106: BSNE_Loss=0.4701,local_loss: 0.07970501062716838,global_loss: 0.39035096764564514\n",
      "Step 107: BSNE_Loss=0.3776,local_loss: 0.013257932273765833,global_loss: 0.36429375410079956\n",
      "Step 108: BSNE_Loss=0.2905,local_loss: 0.06159572662214191,global_loss: 0.2288602888584137\n",
      "Step 109: BSNE_Loss=0.5293,local_loss: 0.016313899438075975,global_loss: 0.5130335092544556\n",
      "Step 110: BSNE_Loss=0.4546,local_loss: 0.04896449554438048,global_loss: 0.40567734837532043\n",
      "Step 111: BSNE_Loss=0.6390,local_loss: 0.06384174671542461,global_loss: 0.5751611590385437\n",
      "Step 112: BSNE_Loss=0.6698,local_loss: 0.10037599678674645,global_loss: 0.5693874359130859\n",
      "Step 113: BSNE_Loss=0.5962,local_loss: 0.07870424399356397,global_loss: 0.5175299048423767\n",
      "Step 114: BSNE_Loss=0.4354,local_loss: 0.08400692213452868,global_loss: 0.3513542115688324\n",
      "Step 115: BSNE_Loss=0.5821,local_loss: 0.013810577082441516,global_loss: 0.5683271884918213\n",
      "Step 116: BSNE_Loss=0.3490,local_loss: -0.018216718983007345,global_loss: 0.36725637316703796\n",
      "Step 117: BSNE_Loss=0.5906,local_loss: 0.045536506455727765,global_loss: 0.5451134443283081\n",
      "Step 118: BSNE_Loss=0.6384,local_loss: 0.1196546790890163,global_loss: 0.5187755227088928\n",
      "Step 119: BSNE_Loss=0.6425,local_loss: 0.09928087431908245,global_loss: 0.5431733131408691\n",
      "Step 120: BSNE_Loss=0.8177,local_loss: 0.03634799814228955,global_loss: 0.7813378572463989\n",
      "Step 121: BSNE_Loss=0.5400,local_loss: 0.03297644401249737,global_loss: 0.5070197582244873\n",
      "Step 122: BSNE_Loss=0.5152,local_loss: 0.11237351932813866,global_loss: 0.4028474986553192\n",
      "Step 123: BSNE_Loss=0.5008,local_loss: 0.07660557809388886,global_loss: 0.4242064952850342\n",
      "Step 124: BSNE_Loss=0.6551,local_loss: 0.052103172331462194,global_loss: 0.6030122637748718\n",
      "Step 125: BSNE_Loss=0.5004,local_loss: 0.07014283613164396,global_loss: 0.43024083971977234\n",
      "Step 126: BSNE_Loss=0.5777,local_loss: 0.041538471721157225,global_loss: 0.5361689329147339\n",
      "Step 127: BSNE_Loss=0.7527,local_loss: 0.04498048731386278,global_loss: 0.7076865434646606\n",
      "Step 128: BSNE_Loss=0.6834,local_loss: 0.08229209646372089,global_loss: 0.6011506915092468\n",
      "Step 129: BSNE_Loss=0.4824,local_loss: 0.09360185342010696,global_loss: 0.3887731730937958\n",
      "Step 130: BSNE_Loss=0.3947,local_loss: 0.055112272643634753,global_loss: 0.3396206796169281\n",
      "Step 131: BSNE_Loss=0.6305,local_loss: 0.02279718097451571,global_loss: 0.6077444553375244\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 132: BSNE_Loss=0.6953,local_loss: 0.07438559072333688,global_loss: 0.6209273934364319\n",
      "Step 133: BSNE_Loss=0.6319,local_loss: 0.07393029811590618,global_loss: 0.5579910278320312\n",
      "Step 134: BSNE_Loss=0.6690,local_loss: 0.09226321267693478,global_loss: 0.5767655372619629\n",
      "Step 135: BSNE_Loss=0.4547,local_loss: 0.09004658528872433,global_loss: 0.364610880613327\n",
      "Step 136: BSNE_Loss=0.3668,local_loss: 0.05903666337632084,global_loss: 0.30772432684898376\n",
      "Step 137: BSNE_Loss=0.5558,local_loss: 0.05681289339879786,global_loss: 0.49893784523010254\n",
      "Step 138: BSNE_Loss=0.5664,local_loss: 0.036098231705777986,global_loss: 0.5302976369857788\n",
      "Step 139: BSNE_Loss=0.4577,local_loss: 0.07584102815718984,global_loss: 0.3818749487400055\n",
      "Step 140: BSNE_Loss=0.5951,local_loss: 0.14071752450624803,global_loss: 0.4543939530849457\n",
      "Step 141: BSNE_Loss=0.6977,local_loss: 0.08121263720290565,global_loss: 0.6164990663528442\n",
      "Step 142: BSNE_Loss=0.6071,local_loss: 0.1357052668046245,global_loss: 0.4713912606239319\n",
      "Step 143: BSNE_Loss=0.6921,local_loss: 0.0303072376197567,global_loss: 0.6617582440376282\n",
      "Step 144: BSNE_Loss=0.6580,local_loss: 0.04798562118422756,global_loss: 0.6100159883499146\n",
      "Step 145: BSNE_Loss=0.5402,local_loss: 0.08217730892682568,global_loss: 0.4580205976963043\n",
      "Step 146: BSNE_Loss=0.4835,local_loss: 0.05555055960303179,global_loss: 0.42795759439468384\n",
      "Step 147: BSNE_Loss=0.5345,local_loss: 0.03698610455398676,global_loss: 0.4974639415740967\n",
      "Step 148: BSNE_Loss=0.6047,local_loss: 0.011418806336651438,global_loss: 0.5933064222335815\n",
      "Step 149: BSNE_Loss=0.6199,local_loss: 0.09670482499726644,global_loss: 0.5232067108154297\n",
      "Step 150: BSNE_Loss=0.7128,local_loss: 0.08392672741186402,global_loss: 0.6288452744483948\n",
      "Step 151: BSNE_Loss=0.4397,local_loss: 0.038404613332500476,global_loss: 0.4012530744075775\n",
      "Step 152: BSNE_Loss=0.5343,local_loss: 0.028852833670827395,global_loss: 0.505430281162262\n",
      "Step 153: BSNE_Loss=0.6481,local_loss: 0.06168311855584888,global_loss: 0.5864071846008301\n",
      "Step 154: BSNE_Loss=0.4920,local_loss: 0.03799829873906038,global_loss: 0.45402926206588745\n",
      "Step 155: BSNE_Loss=0.6633,local_loss: -0.014715145766337532,global_loss: 0.6779695153236389\n",
      "Step 156: BSNE_Loss=0.4026,local_loss: 0.04826089433405091,global_loss: 0.35431182384490967\n",
      "Step 157: BSNE_Loss=0.7191,local_loss: 0.0626432048549625,global_loss: 0.6564123630523682\n",
      "Step 158: BSNE_Loss=0.4892,local_loss: 0.07751530617424593,global_loss: 0.4117306172847748\n",
      "Step 159: BSNE_Loss=0.5414,local_loss: 0.07250528192160688,global_loss: 0.4688482880592346\n",
      "Step 160: BSNE_Loss=0.3563,local_loss: 0.0415231226386667,global_loss: 0.31478166580200195\n",
      "Step 161: BSNE_Loss=0.4084,local_loss: 0.024469756307262554,global_loss: 0.3839765787124634\n",
      "Step 162: BSNE_Loss=0.5886,local_loss: 0.06236283039920615,global_loss: 0.5262254476547241\n",
      "Step 163: BSNE_Loss=0.4437,local_loss: 0.02587555100904018,global_loss: 0.4178290367126465\n",
      "Step 164: BSNE_Loss=0.4585,local_loss: 0.035058029096265354,global_loss: 0.42345064878463745\n",
      "Step 165: BSNE_Loss=0.5768,local_loss: 0.010203839366184112,global_loss: 0.5665497183799744\n",
      "Step 166: BSNE_Loss=0.5260,local_loss: 0.0437849937656833,global_loss: 0.48223280906677246\n",
      "Step 167: BSNE_Loss=0.3464,local_loss: -0.0007415180601992133,global_loss: 0.3471682667732239\n",
      "Step 168: BSNE_Loss=0.4196,local_loss: 0.10474919380635944,global_loss: 0.3148069977760315\n",
      "Step 169: BSNE_Loss=0.4327,local_loss: 0.027883116110501333,global_loss: 0.4048527479171753\n",
      "Step 170: BSNE_Loss=0.5083,local_loss: -0.016583170585353545,global_loss: 0.5249253511428833\n",
      "Step 171: BSNE_Loss=0.7508,local_loss: 0.04279068724562767,global_loss: 0.7079713940620422\n",
      "Step 172: BSNE_Loss=0.3567,local_loss: 0.08860559535766598,global_loss: 0.26806268095970154\n",
      "Step 173: BSNE_Loss=0.7008,local_loss: 0.09988663391134445,global_loss: 0.6009057760238647\n",
      "Step 174: BSNE_Loss=0.6606,local_loss: 0.07463877339693961,global_loss: 0.5859646797180176\n",
      "Step 175: BSNE_Loss=0.6118,local_loss: 0.02423142297027689,global_loss: 0.5876173377037048\n",
      "Step 176: BSNE_Loss=0.8265,local_loss: 0.024910328296009948,global_loss: 0.8016137480735779\n",
      "Step 177: BSNE_Loss=0.4683,local_loss: 0.1100709108282445,global_loss: 0.358216792345047\n",
      "Step 178: BSNE_Loss=0.5503,local_loss: 0.059998248940252255,global_loss: 0.4903072714805603\n",
      "Step 179: BSNE_Loss=0.5232,local_loss: -0.008712067079613216,global_loss: 0.5319468379020691\n",
      "Step 180: BSNE_Loss=0.4720,local_loss: 0.04687936143341197,global_loss: 0.42513251304626465\n",
      "Step 181: BSNE_Loss=0.4836,local_loss: 0.010663625473261511,global_loss: 0.472923219203949\n",
      "Step 182: BSNE_Loss=0.5984,local_loss: 0.018386218888829704,global_loss: 0.5800453424453735\n",
      "Step 183: BSNE_Loss=0.4881,local_loss: 0.16119802045738107,global_loss: 0.3268888592720032\n",
      "Step 184: BSNE_Loss=0.3736,local_loss: 0.00359496554517138,global_loss: 0.3700047731399536\n",
      "Step 185: BSNE_Loss=0.3668,local_loss: 0.02178720360928481,global_loss: 0.3450230658054352\n",
      "Step 186: BSNE_Loss=0.5586,local_loss: 0.04725548895361579,global_loss: 0.5113604068756104\n",
      "Step 187: BSNE_Loss=0.4527,local_loss: 0.024056569339436732,global_loss: 0.42868444323539734\n",
      "Step 188: BSNE_Loss=0.4427,local_loss: 0.01801958649422701,global_loss: 0.4246724247932434\n",
      "Step 189: BSNE_Loss=0.6397,local_loss: 0.0573418748089724,global_loss: 0.582317590713501\n",
      "Step 190: BSNE_Loss=0.5413,local_loss: 0.013096532779259179,global_loss: 0.5281696319580078\n",
      "Step 191: BSNE_Loss=0.4075,local_loss: 0.03192098837698911,global_loss: 0.3755515217781067\n",
      "Step 192: BSNE_Loss=0.5764,local_loss: 0.03937183331722819,global_loss: 0.5370092988014221\n",
      "Step 193: BSNE_Loss=0.4441,local_loss: 0.004011323756891993,global_loss: 0.44006869196891785\n",
      "Step 194: BSNE_Loss=0.4493,local_loss: 0.03581386269762152,global_loss: 0.41352564096450806\n",
      "Step 195: BSNE_Loss=0.4781,local_loss: 0.09626920058128445,global_loss: 0.3818196952342987\n",
      "Step 196: BSNE_Loss=0.4485,local_loss: 0.06723952125317853,global_loss: 0.3812509775161743\n",
      "Step 197: BSNE_Loss=0.3648,local_loss: 0.07806033182249483,global_loss: 0.2867830693721771\n",
      "Step 198: BSNE_Loss=0.5181,local_loss: 0.09883372111346475,global_loss: 0.41929152607917786\n",
      "Step 199: BSNE_Loss=0.3406,local_loss: 0.024038295120691184,global_loss: 0.31659093499183655\n",
      "Step 200: BSNE_Loss=0.4625,local_loss: 0.11620273710594503,global_loss: 0.34634149074554443\n",
      "Step 201: BSNE_Loss=0.7820,local_loss: 0.04314816009844666,global_loss: 0.7388418316841125\n",
      "Step 202: BSNE_Loss=0.5769,local_loss: 0.022980608832574675,global_loss: 0.5539205074310303\n",
      "Step 203: BSNE_Loss=0.4778,local_loss: 0.044443831402686006,global_loss: 0.43336352705955505\n",
      "Step 204: BSNE_Loss=0.4999,local_loss: -0.016243535756615746,global_loss: 0.5161861181259155\n",
      "Step 205: BSNE_Loss=0.3531,local_loss: 0.058464655265990324,global_loss: 0.29467737674713135\n",
      "Step 206: BSNE_Loss=0.6562,local_loss: 0.14410643353617392,global_loss: 0.5120627284049988\n",
      "Step 207: BSNE_Loss=0.4672,local_loss: 0.08517513598881489,global_loss: 0.3819960355758667\n",
      "Step 208: BSNE_Loss=0.5309,local_loss: -0.023552196263847105,global_loss: 0.5544577836990356\n",
      "Step 209: BSNE_Loss=0.3403,local_loss: 0.05993621865141073,global_loss: 0.28036993741989136\n",
      "Step 210: BSNE_Loss=0.6580,local_loss: 0.026762588973606984,global_loss: 0.631197452545166\n",
      "Step 211: BSNE_Loss=0.4396,local_loss: 0.0012986799934412297,global_loss: 0.4382724463939667\n",
      "Step 212: BSNE_Loss=0.5766,local_loss: 0.09579706767563524,global_loss: 0.4807561933994293\n",
      "Step 213: BSNE_Loss=0.6271,local_loss: 0.03601952940503392,global_loss: 0.591090977191925\n",
      "Step 214: BSNE_Loss=0.6393,local_loss: 0.10705307635306702,global_loss: 0.5322842001914978\n",
      "Step 215: BSNE_Loss=0.4510,local_loss: 0.06300248593259536,global_loss: 0.38802966475486755\n",
      "Step 216: BSNE_Loss=0.4101,local_loss: 0.05592102838390492,global_loss: 0.3541411757469177\n",
      "Step 217: BSNE_Loss=0.5916,local_loss: 0.07870025073436017,global_loss: 0.5129045248031616\n",
      "Step 218: BSNE_Loss=0.3837,local_loss: -0.00016643940203693754,global_loss: 0.3839038908481598\n",
      "Step 219: BSNE_Loss=0.5185,local_loss: 0.11357837409431235,global_loss: 0.4048731029033661\n",
      "Step 220: BSNE_Loss=0.5621,local_loss: 0.018704381541380207,global_loss: 0.54341721534729\n",
      "Step 221: BSNE_Loss=0.4848,local_loss: 0.11306141195029047,global_loss: 0.37171968817710876\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 222: BSNE_Loss=0.4424,local_loss: 0.03227585581766881,global_loss: 0.41014036536216736\n",
      "Step 223: BSNE_Loss=0.5172,local_loss: -0.017377475701996756,global_loss: 0.5345396995544434\n",
      "Step 224: BSNE_Loss=0.5362,local_loss: 0.01679081020087761,global_loss: 0.5193906426429749\n",
      "Step 225: BSNE_Loss=0.3097,local_loss: 0.047682027908507396,global_loss: 0.26197460293769836\n",
      "Step 226: BSNE_Loss=0.3785,local_loss: 0.028436433731301128,global_loss: 0.35010799765586853\n",
      "Step 227: BSNE_Loss=0.4578,local_loss: 0.04975866244242732,global_loss: 0.40803948044776917\n",
      "Step 228: BSNE_Loss=0.6038,local_loss: 0.02659219270310721,global_loss: 0.5771817564964294\n",
      "Step 229: BSNE_Loss=0.4454,local_loss: 0.037477541880857515,global_loss: 0.40792351961135864\n",
      "Step 230: BSNE_Loss=0.5039,local_loss: -0.02737157950934821,global_loss: 0.5312216877937317\n",
      "Step 231: BSNE_Loss=0.9013,local_loss: 0.03829776236037767,global_loss: 0.862953782081604\n",
      "Step 232: BSNE_Loss=0.5383,local_loss: 0.0426334642127976,global_loss: 0.4956304430961609\n",
      "Step 233: BSNE_Loss=0.5630,local_loss: 0.03326426554947723,global_loss: 0.5297098159790039\n",
      "Step 234: BSNE_Loss=0.5357,local_loss: 0.01804815762548051,global_loss: 0.5176153779029846\n",
      "Step 235: BSNE_Loss=0.7411,local_loss: 0.040732381427162415,global_loss: 0.7004088759422302\n",
      "Step 236: BSNE_Loss=0.4200,local_loss: 0.06126128565061269,global_loss: 0.35870683193206787\n",
      "Step 237: BSNE_Loss=0.3240,local_loss: 0.11972739844083577,global_loss: 0.2042732685804367\n",
      "Step 238: BSNE_Loss=0.2887,local_loss: 0.016417835923086276,global_loss: 0.2722748816013336\n",
      "Step 239: BSNE_Loss=0.3036,local_loss: 0.013867116360300998,global_loss: 0.2897116243839264\n",
      "Step 240: BSNE_Loss=0.5660,local_loss: 0.12969911130898554,global_loss: 0.4363177716732025\n",
      "Step 241: BSNE_Loss=0.5067,local_loss: 0.007496753005099875,global_loss: 0.4991621971130371\n",
      "Step 242: BSNE_Loss=0.5220,local_loss: 0.015650748718014106,global_loss: 0.5063701868057251\n",
      "Step 243: BSNE_Loss=0.5787,local_loss: -0.008307901605895599,global_loss: 0.5869860053062439\n",
      "Step 244: BSNE_Loss=0.6367,local_loss: 0.014820048727771802,global_loss: 0.6218467354774475\n",
      "Step 245: BSNE_Loss=0.4249,local_loss: 0.0353446742222358,global_loss: 0.38960322737693787\n",
      "Step 246: BSNE_Loss=0.5618,local_loss: 0.0349624012017084,global_loss: 0.5268151760101318\n",
      "Step 247: BSNE_Loss=0.5911,local_loss: 0.07759905270934876,global_loss: 0.5134848356246948\n",
      "Step 248: BSNE_Loss=0.6770,local_loss: -0.012410824784168408,global_loss: 0.6894052028656006\n",
      "Step 249: BSNE_Loss=0.6893,local_loss: -0.006933004386141733,global_loss: 0.6962777376174927\n",
      "Step 250: BSNE_Loss=0.7769,local_loss: 0.0016110395531356402,global_loss: 0.7752463221549988\n",
      "Step 251: BSNE_Loss=0.4060,local_loss: -0.010229603618921229,global_loss: 0.41618475317955017\n",
      "Step 252: BSNE_Loss=0.5431,local_loss: 0.020964359133442294,global_loss: 0.522182822227478\n",
      "Step 253: BSNE_Loss=0.4878,local_loss: 0.11296567489359945,global_loss: 0.3748796582221985\n",
      "Step 254: BSNE_Loss=0.3789,local_loss: 0.06947434439078759,global_loss: 0.30946600437164307\n",
      "Step 255: BSNE_Loss=0.6798,local_loss: 0.10441768085257133,global_loss: 0.5753960013389587\n",
      "Step 256: BSNE_Loss=0.6300,local_loss: 0.07193248681579328,global_loss: 0.5580806732177734\n",
      "Step 257: BSNE_Loss=0.3073,local_loss: 0.03182970478250763,global_loss: 0.27551937103271484\n",
      "Step 258: BSNE_Loss=0.2823,local_loss: 0.10365200469196383,global_loss: 0.1786039173603058\n",
      "Step 259: BSNE_Loss=0.2432,local_loss: 0.14520091244660546,global_loss: 0.0979931429028511\n",
      "Step 260: BSNE_Loss=0.5381,local_loss: 0.03918086119543822,global_loss: 0.498898983001709\n",
      "Step 261: BSNE_Loss=0.5940,local_loss: 0.01334658922395938,global_loss: 0.5806466937065125\n",
      "Step 262: BSNE_Loss=0.3370,local_loss: 0.11459732936086685,global_loss: 0.22243453562259674\n",
      "Step 263: BSNE_Loss=0.5113,local_loss: 0.08493900232033713,global_loss: 0.42632851004600525\n",
      "Step 264: BSNE_Loss=0.4508,local_loss: 0.09124224392903664,global_loss: 0.3595973253250122\n",
      "Step 265: BSNE_Loss=0.5415,local_loss: 0.03465938213137083,global_loss: 0.50681471824646\n",
      "Step 266: BSNE_Loss=0.5616,local_loss: 0.030511602304219013,global_loss: 0.5311211347579956\n",
      "Step 267: BSNE_Loss=0.5441,local_loss: 0.013115676747859024,global_loss: 0.5310333967208862\n",
      "Step 268: BSNE_Loss=0.6715,local_loss: 0.0825297045196082,global_loss: 0.5889980792999268\n",
      "Step 269: BSNE_Loss=0.7004,local_loss: 0.08764561836639402,global_loss: 0.6127299070358276\n",
      "Step 270: BSNE_Loss=0.7491,local_loss: 0.042298084796512696,global_loss: 0.7067667841911316\n",
      "Step 271: BSNE_Loss=0.6691,local_loss: 0.04872817852597397,global_loss: 0.6203284859657288\n",
      "Step 272: BSNE_Loss=0.4844,local_loss: 0.07254396372663485,global_loss: 0.411842942237854\n",
      "Step 273: BSNE_Loss=0.3274,local_loss: 0.052729974718242145,global_loss: 0.27462074160575867\n",
      "Step 274: BSNE_Loss=0.3151,local_loss: 0.12015651901192875,global_loss: 0.19495747983455658\n",
      "Step 275: BSNE_Loss=0.4266,local_loss: -0.003941691365024107,global_loss: 0.4305538535118103\n",
      "Step 276: BSNE_Loss=0.5298,local_loss: 0.052586078739288535,global_loss: 0.4772056043148041\n",
      "Step 277: BSNE_Loss=0.5518,local_loss: 0.026644056364058456,global_loss: 0.5251191258430481\n",
      "Step 278: BSNE_Loss=0.2934,local_loss: 0.05572731379179527,global_loss: 0.23767352104187012\n",
      "Step 279: BSNE_Loss=0.5959,local_loss: 0.017261271905407712,global_loss: 0.5786529779434204\n",
      "Step 280: BSNE_Loss=0.5702,local_loss: 0.07492640829842004,global_loss: 0.4953196346759796\n",
      "Step 281: BSNE_Loss=0.2441,local_loss: 0.09028048731744265,global_loss: 0.15381963551044464\n",
      "Step 282: BSNE_Loss=0.5405,local_loss: -0.006399133581486991,global_loss: 0.5469250679016113\n",
      "Step 283: BSNE_Loss=0.4586,local_loss: 0.062207282045537736,global_loss: 0.39638885855674744\n",
      "Step 284: BSNE_Loss=0.7288,local_loss: 0.0025308994149719146,global_loss: 0.7262378931045532\n",
      "Step 285: BSNE_Loss=0.7179,local_loss: 0.09346355744821906,global_loss: 0.6244522333145142\n",
      "Step 286: BSNE_Loss=0.1759,local_loss: 0.05534319845795405,global_loss: 0.12053779512643814\n",
      "Center node 8 finished at step 287 with loss 0.1759\n",
      "\n",
      "=== Pretraining Epoch 9 (Center Node: 9) ===\n",
      "Step 0: BSNE_Loss=0.2565,local_loss: 0.196775430121712,global_loss: 0.05973472818732262\n",
      "Step 1: BSNE_Loss=0.3909,local_loss: 0.03287752441003125,global_loss: 0.3580687344074249\n",
      "Step 2: BSNE_Loss=0.4075,local_loss: 0.1013401039495398,global_loss: 0.3061937987804413\n",
      "Step 3: BSNE_Loss=0.3312,local_loss: 0.050536656028117126,global_loss: 0.2806324362754822\n",
      "Step 4: BSNE_Loss=0.5326,local_loss: 0.10298195007783456,global_loss: 0.42962539196014404\n",
      "Step 5: BSNE_Loss=0.0533,local_loss: 0.08424359439845532,global_loss: -0.03090588189661503\n",
      "Center node 9 finished at step 6 with loss 0.0533\n",
      "\n",
      "=== Pretraining Epoch 10 (Center Node: 10) ===\n",
      "Step 0: BSNE_Loss=0.3144,local_loss: 0.13195947263945298,global_loss: 0.1824360340833664\n",
      "Step 1: BSNE_Loss=-0.1428,local_loss: 0.19607503652003833,global_loss: -0.3389061391353607\n",
      "Center node 10 finished at step 2 with loss -0.1428\n",
      "\n",
      "=== Pretraining Epoch 11 (Center Node: 11) ===\n",
      "Step 0: BSNE_Loss=0.3919,local_loss: 0.19863417685163,global_loss: 0.19323858618736267\n",
      "Step 1: BSNE_Loss=0.4982,local_loss: 0.25847852034244734,global_loss: 0.2397238165140152\n",
      "Step 2: BSNE_Loss=0.3948,local_loss: 0.22351568469034397,global_loss: 0.1712498664855957\n",
      "Step 3: BSNE_Loss=0.2412,local_loss: 0.17469115987559022,global_loss: 0.06650928407907486\n",
      "Step 4: BSNE_Loss=0.5732,local_loss: 0.1547772340922894,global_loss: 0.418405681848526\n",
      "Step 5: BSNE_Loss=0.4424,local_loss: 0.09386631880704528,global_loss: 0.3485737144947052\n",
      "Step 6: BSNE_Loss=0.5434,local_loss: 0.21152062475963274,global_loss: 0.3318902254104614\n",
      "Step 7: BSNE_Loss=0.4695,local_loss: 0.11035573581577478,global_loss: 0.359125554561615\n",
      "Step 8: BSNE_Loss=0.5371,local_loss: 0.1893755607228951,global_loss: 0.3477184772491455\n",
      "Step 9: BSNE_Loss=0.5335,local_loss: 0.17202032203769207,global_loss: 0.3614963889122009\n",
      "Step 10: BSNE_Loss=0.5120,local_loss: 0.14684298651985678,global_loss: 0.36518731713294983\n",
      "Step 11: BSNE_Loss=0.3319,local_loss: 0.187043728138446,global_loss: 0.14489060640335083\n",
      "Step 12: BSNE_Loss=0.5167,local_loss: 0.13311303811930486,global_loss: 0.383553683757782\n",
      "Step 13: BSNE_Loss=0.5361,local_loss: 0.1578874122118794,global_loss: 0.3782269358634949\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 14: BSNE_Loss=0.3656,local_loss: 0.16721330594812991,global_loss: 0.19836318492889404\n",
      "Step 15: BSNE_Loss=0.4162,local_loss: 0.11989476890622762,global_loss: 0.2963004410266876\n",
      "Step 16: BSNE_Loss=0.4449,local_loss: 0.12660780299855412,global_loss: 0.3183065950870514\n",
      "Step 17: BSNE_Loss=0.5295,local_loss: 0.202819102368579,global_loss: 0.32664328813552856\n",
      "Step 18: BSNE_Loss=0.5177,local_loss: 0.18338260312923338,global_loss: 0.33436253666877747\n",
      "Step 19: BSNE_Loss=0.6300,local_loss: 0.12736047470677647,global_loss: 0.5026470422744751\n",
      "Step 20: BSNE_Loss=0.5139,local_loss: 0.13225513058377275,global_loss: 0.3816925883293152\n",
      "Step 21: BSNE_Loss=0.5488,local_loss: 0.11243536310004924,global_loss: 0.4363270103931427\n",
      "Step 22: BSNE_Loss=0.5196,local_loss: 0.14836149498370424,global_loss: 0.3712405264377594\n",
      "Step 23: BSNE_Loss=0.3403,local_loss: 0.12478582793574491,global_loss: 0.21547894179821014\n",
      "Step 24: BSNE_Loss=0.4805,local_loss: 0.22019607883433323,global_loss: 0.26030272245407104\n",
      "Step 25: BSNE_Loss=0.3730,local_loss: 0.14810162242502697,global_loss: 0.22493180632591248\n",
      "Step 26: BSNE_Loss=0.6836,local_loss: 0.15795380839753725,global_loss: 0.5256340503692627\n",
      "Step 27: BSNE_Loss=0.5029,local_loss: 0.24197588064474052,global_loss: 0.2608988881111145\n",
      "Step 28: BSNE_Loss=0.3458,local_loss: 0.1745911689083042,global_loss: 0.17125579714775085\n",
      "Step 29: BSNE_Loss=0.4035,local_loss: 0.11309054527525587,global_loss: 0.2904212474822998\n",
      "Step 30: BSNE_Loss=0.4866,local_loss: 0.20972119567719325,global_loss: 0.276887983083725\n",
      "Step 31: BSNE_Loss=0.3607,local_loss: 0.09418215551559879,global_loss: 0.26654860377311707\n",
      "Step 32: BSNE_Loss=0.3839,local_loss: 0.1406908881282908,global_loss: 0.2431694120168686\n",
      "Step 33: BSNE_Loss=0.4908,local_loss: 0.13931844298316579,global_loss: 0.35145413875579834\n",
      "Step 34: BSNE_Loss=0.5328,local_loss: 0.14936281591094006,global_loss: 0.383395254611969\n",
      "Step 35: BSNE_Loss=0.4553,local_loss: 0.14159458729491634,global_loss: 0.31371235847473145\n",
      "Step 36: BSNE_Loss=0.4311,local_loss: 0.18007337120020536,global_loss: 0.2509985566139221\n",
      "Step 37: BSNE_Loss=0.2878,local_loss: 0.17338787864409289,global_loss: 0.11438164860010147\n",
      "Step 38: BSNE_Loss=0.6728,local_loss: 0.1159360464472549,global_loss: 0.5568978786468506\n",
      "Step 39: BSNE_Loss=0.4718,local_loss: 0.16688150647558483,global_loss: 0.30488669872283936\n",
      "Step 40: BSNE_Loss=0.6013,local_loss: 0.2271648736445518,global_loss: 0.3741149306297302\n",
      "Step 41: BSNE_Loss=0.3423,local_loss: 0.21167598269940008,global_loss: 0.13057410717010498\n",
      "Step 42: BSNE_Loss=0.5206,local_loss: 0.14313649249185617,global_loss: 0.37748363614082336\n",
      "Step 43: BSNE_Loss=0.3072,local_loss: 0.16398967629931072,global_loss: 0.1431684046983719\n",
      "Step 44: BSNE_Loss=0.5077,local_loss: 0.12790046945173214,global_loss: 0.37977609038352966\n",
      "Step 45: BSNE_Loss=0.4378,local_loss: 0.2174620208625585,global_loss: 0.22033657133579254\n",
      "Step 46: BSNE_Loss=0.6876,local_loss: 0.14965212497020985,global_loss: 0.5378994345664978\n",
      "Step 47: BSNE_Loss=0.5970,local_loss: 0.14234089043958506,global_loss: 0.45464909076690674\n",
      "Step 48: BSNE_Loss=0.5154,local_loss: 0.11088721814809444,global_loss: 0.4045597314834595\n",
      "Step 49: BSNE_Loss=0.3094,local_loss: 0.12695057899682133,global_loss: 0.18249830603599548\n",
      "Step 50: BSNE_Loss=0.4069,local_loss: 0.16861375750072302,global_loss: 0.23831020295619965\n",
      "Step 51: BSNE_Loss=0.4510,local_loss: 0.1239405364056153,global_loss: 0.3271092474460602\n",
      "Step 52: BSNE_Loss=0.4834,local_loss: 0.11352670262051469,global_loss: 0.3698752224445343\n",
      "Step 53: BSNE_Loss=0.7802,local_loss: 0.1333978611334735,global_loss: 0.6467961072921753\n",
      "Step 54: BSNE_Loss=0.5497,local_loss: 0.09563274896370015,global_loss: 0.4540718197822571\n",
      "Step 55: BSNE_Loss=0.5213,local_loss: 0.1137749830056064,global_loss: 0.4075331687927246\n",
      "Step 56: BSNE_Loss=0.5498,local_loss: 0.08005131886741525,global_loss: 0.46973493695259094\n",
      "Step 57: BSNE_Loss=0.5772,local_loss: 0.14971471637871928,global_loss: 0.4274848997592926\n",
      "Step 58: BSNE_Loss=0.4431,local_loss: 0.13423300719163658,global_loss: 0.308914452791214\n",
      "Step 59: BSNE_Loss=0.1825,local_loss: 0.11584923097955932,global_loss: 0.06668081134557724\n",
      "Center node 11 finished at step 60 with loss 0.1825\n",
      "\n",
      "=== Pretraining Epoch 12 (Center Node: 12) ===\n",
      "Step 0: BSNE_Loss=0.1851,local_loss: 0.09234577695438338,global_loss: 0.09274912625551224\n",
      "Center node 12 finished at step 1 with loss 0.1851\n",
      "\n",
      "=== Pretraining Epoch 13 (Center Node: 13) ===\n",
      "Step 0: BSNE_Loss=0.7337,local_loss: 0.10193416623485992,global_loss: 0.6317185759544373\n",
      "Step 1: BSNE_Loss=0.5655,local_loss: 0.06774545228391213,global_loss: 0.49773138761520386\n",
      "Step 2: BSNE_Loss=0.3423,local_loss: 0.07208085970619017,global_loss: 0.2702275514602661\n",
      "Step 3: BSNE_Loss=0.4370,local_loss: 0.08906966928959772,global_loss: 0.3478917181491852\n",
      "Step 4: BSNE_Loss=0.5776,local_loss: 0.04826291155905346,global_loss: 0.529329776763916\n",
      "Step 5: BSNE_Loss=0.5941,local_loss: 0.08050962930917137,global_loss: 0.5136362314224243\n",
      "Step 6: BSNE_Loss=0.4904,local_loss: 0.04214133204204032,global_loss: 0.44826024770736694\n",
      "Step 7: BSNE_Loss=0.5826,local_loss: 0.1476833815422169,global_loss: 0.4348830580711365\n",
      "Step 8: BSNE_Loss=0.4885,local_loss: 0.11031180878946632,global_loss: 0.3781476318836212\n",
      "Step 9: BSNE_Loss=0.4822,local_loss: 0.07826620636974807,global_loss: 0.403961718082428\n",
      "Step 10: BSNE_Loss=0.5760,local_loss: 0.09423539047789657,global_loss: 0.48174533247947693\n",
      "Step 11: BSNE_Loss=0.6401,local_loss: 0.04377040845941361,global_loss: 0.5963326096534729\n",
      "Step 12: BSNE_Loss=0.7711,local_loss: 0.09997452188072903,global_loss: 0.6711347699165344\n",
      "Step 13: BSNE_Loss=0.4940,local_loss: 0.07944264258384628,global_loss: 0.4145689606666565\n",
      "Step 14: BSNE_Loss=0.3847,local_loss: 0.047202100186173636,global_loss: 0.337543249130249\n",
      "Step 15: BSNE_Loss=0.6289,local_loss: 0.05650852758219461,global_loss: 0.5723509788513184\n",
      "Step 16: BSNE_Loss=0.4401,local_loss: 0.060380492373890225,global_loss: 0.3797364830970764\n",
      "Step 17: BSNE_Loss=0.6461,local_loss: 0.09684753871625205,global_loss: 0.5493001341819763\n",
      "Step 18: BSNE_Loss=0.2993,local_loss: 0.0961000606135993,global_loss: 0.20317254960536957\n",
      "Step 19: BSNE_Loss=0.3123,local_loss: 0.07288547730191088,global_loss: 0.23945635557174683\n",
      "Step 20: BSNE_Loss=0.4170,local_loss: 0.09405535774336196,global_loss: 0.3229345381259918\n",
      "Step 21: BSNE_Loss=0.6263,local_loss: 0.11960985840862969,global_loss: 0.5067375898361206\n",
      "Step 22: BSNE_Loss=0.5006,local_loss: 0.13919100510816523,global_loss: 0.3613865077495575\n",
      "Step 23: BSNE_Loss=0.6458,local_loss: 0.08614739756103271,global_loss: 0.5596080422401428\n",
      "Step 24: BSNE_Loss=0.4272,local_loss: 0.10662423319138618,global_loss: 0.3205653727054596\n",
      "Step 25: BSNE_Loss=0.2804,local_loss: 0.054311942982557045,global_loss: 0.226093590259552\n",
      "Step 26: BSNE_Loss=0.2709,local_loss: 0.07358187531844693,global_loss: 0.19735799729824066\n",
      "Step 27: BSNE_Loss=0.5032,local_loss: 0.0646023349211162,global_loss: 0.438570111989975\n",
      "Step 28: BSNE_Loss=0.3503,local_loss: 0.23319591624979336,global_loss: 0.11706073582172394\n",
      "Step 29: BSNE_Loss=0.5109,local_loss: 0.10464963622442262,global_loss: 0.40627485513687134\n",
      "Step 30: BSNE_Loss=0.1669,local_loss: 0.1518329966111233,global_loss: 0.0150444395840168\n",
      "Center node 13 finished at step 31 with loss 0.1669\n",
      "\n",
      "=== Pretraining Epoch 14 (Center Node: 14) ===\n",
      "Step 0: BSNE_Loss=0.2547,local_loss: 0.1335543866643653,global_loss: 0.12112230807542801\n",
      "Step 1: BSNE_Loss=0.6132,local_loss: 0.22515335863370534,global_loss: 0.3880314528942108\n",
      "Step 2: BSNE_Loss=0.3863,local_loss: 0.14399152550408884,global_loss: 0.24229277670383453\n",
      "Step 3: BSNE_Loss=0.3772,local_loss: 0.15648408914769824,global_loss: 0.22067993879318237\n",
      "Step 4: BSNE_Loss=0.4445,local_loss: 0.17676115496791617,global_loss: 0.26775965094566345\n",
      "Step 5: BSNE_Loss=0.1812,local_loss: 0.15466173180448478,global_loss: 0.02652672864496708\n",
      "Center node 14 finished at step 6 with loss 0.1812\n",
      "\n",
      "=== Pretraining Epoch 15 (Center Node: 15) ===\n",
      "Step 0: BSNE_Loss=0.4300,local_loss: 0.09221315993970455,global_loss: 0.3377823233604431\n",
      "Step 1: BSNE_Loss=0.3092,local_loss: 0.04076770278019474,global_loss: 0.26841282844543457\n",
      "Step 2: BSNE_Loss=0.4293,local_loss: 0.06160800876244727,global_loss: 0.3677127957344055\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 3: BSNE_Loss=0.5100,local_loss: 0.016687649414518363,global_loss: 0.4933035671710968\n",
      "Step 4: BSNE_Loss=0.5789,local_loss: 0.0938327935603247,global_loss: 0.48508644104003906\n",
      "Step 5: BSNE_Loss=0.4215,local_loss: 0.038791465211856044,global_loss: 0.38266393542289734\n",
      "Step 6: BSNE_Loss=0.4363,local_loss: 0.054179824271925484,global_loss: 0.38212791085243225\n",
      "Step 7: BSNE_Loss=0.2465,local_loss: 0.06470914911646529,global_loss: 0.18183818459510803\n",
      "Step 8: BSNE_Loss=0.3836,local_loss: -0.001605468670333914,global_loss: 0.3852107524871826\n",
      "Step 9: BSNE_Loss=0.4970,local_loss: 0.03933522011143745,global_loss: 0.4576275050640106\n",
      "Step 10: BSNE_Loss=0.4917,local_loss: 0.11267497382170985,global_loss: 0.37899526953697205\n",
      "Step 11: BSNE_Loss=0.3105,local_loss: 0.1560357867635098,global_loss: 0.15444396436214447\n",
      "Step 12: BSNE_Loss=0.5493,local_loss: 0.06864661315877145,global_loss: 0.48065757751464844\n",
      "Step 13: BSNE_Loss=0.3395,local_loss: 0.07231791681772681,global_loss: 0.2672075033187866\n",
      "Step 14: BSNE_Loss=0.4476,local_loss: 0.1116834486186336,global_loss: 0.3359021544456482\n",
      "Step 15: BSNE_Loss=0.5672,local_loss: 0.0778172132183936,global_loss: 0.4893386960029602\n",
      "Step 16: BSNE_Loss=0.3332,local_loss: 0.012498748275159297,global_loss: 0.3206583559513092\n",
      "Step 17: BSNE_Loss=0.2259,local_loss: 0.09667400012965091,global_loss: 0.1292131543159485\n",
      "Step 18: BSNE_Loss=0.4463,local_loss: 0.034560926541786365,global_loss: 0.4117029011249542\n",
      "Step 19: BSNE_Loss=0.5179,local_loss: 0.009596814393035356,global_loss: 0.5082536935806274\n",
      "Step 20: BSNE_Loss=0.5988,local_loss: 0.05620104227494778,global_loss: 0.542643666267395\n",
      "Step 21: BSNE_Loss=0.3533,local_loss: 0.10086958306056609,global_loss: 0.2523832619190216\n",
      "Step 22: BSNE_Loss=0.4369,local_loss: 0.11453883584303728,global_loss: 0.3223172128200531\n",
      "Step 23: BSNE_Loss=0.2846,local_loss: 0.05113165588861571,global_loss: 0.23345521092414856\n",
      "Step 24: BSNE_Loss=0.5227,local_loss: 0.10638928074957836,global_loss: 0.41631826758384705\n",
      "Step 25: BSNE_Loss=0.2715,local_loss: 0.03919162830324655,global_loss: 0.23229241371154785\n",
      "Step 26: BSNE_Loss=0.4781,local_loss: 0.08636097818677711,global_loss: 0.39169996976852417\n",
      "Step 27: BSNE_Loss=0.3369,local_loss: 0.002613283758521101,global_loss: 0.3342680037021637\n",
      "Step 28: BSNE_Loss=0.4613,local_loss: 0.04809373464172195,global_loss: 0.4131844639778137\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[4]\u001b[39m\u001b[32m, line 155\u001b[39m\n\u001b[32m    153\u001b[39m bu_node_features = []\n\u001b[32m    154\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m node_idx \u001b[38;5;129;01min\u001b[39;00m bu_nodes:\n\u001b[32m--> \u001b[39m\u001b[32m155\u001b[39m     node_subgraph = \u001b[43mcreate_node_subgraph\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnode_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfeat_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_indexs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    156\u001b[39m     _, node_feature = bsne_model([node_subgraph])\n\u001b[32m    157\u001b[39m     bu_node_features.append(node_feature.squeeze(\u001b[32m0\u001b[39m))\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 50\u001b[39m, in \u001b[36mcreate_node_subgraph\u001b[39m\u001b[34m(node_idx, feat_data, edge_indexs, device)\u001b[39m\n\u001b[32m     48\u001b[39m neighbors = \u001b[38;5;28mset\u001b[39m()\n\u001b[32m     49\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m rel_idx \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(edge_indexs)):\n\u001b[32m---> \u001b[39m\u001b[32m50\u001b[39m     edge_index = \u001b[43medge_indexs\u001b[49m\u001b[43m[\u001b[49m\u001b[43mrel_idx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcpu\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m.numpy()\n\u001b[32m     51\u001b[39m     \u001b[38;5;66;03m# 找出以中心点为起点的边的终点\u001b[39;00m\n\u001b[32m     52\u001b[39m     rel_neighbors = edge_index[\u001b[32m1\u001b[39m][edge_index[\u001b[32m0\u001b[39m] == node_idx].tolist()\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 进行完整测试（新）（重复在一个子图上进行训练，loss中不加入平方项）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "#     \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.001, #0.0001\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "    \n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "center_indices = list(range(feat_data.shape[0]))\n",
    "loss_threshold = 0.2\n",
    "sample_size = 50\n",
    "max_steps_per_center = 1000  # 防止死循环\n",
    "\n",
    "for epoch, center_idx in enumerate(center_indices):\n",
    "    print(f\"\\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===\")\n",
    "    step = 0\n",
    "    \n",
    "    # 构建Bp子图\n",
    "    dist_row = dist_matrix[center_idx].cpu().numpy()\n",
    "    probs = np.power(10.0, -dist_row)\n",
    "    probs[center_idx] = 0\n",
    "    probs /= probs.sum()\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "        bp_nodes = [center_idx] + neighbors.tolist()\n",
    "    else:\n",
    "        bp_nodes = [center_idx]\n",
    "\n",
    "    # 构建Bu子图\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "        bu_nodes = [center_idx] + neighbors.tolist()\n",
    "    else:\n",
    "        bu_nodes = [center_idx]\n",
    "        \n",
    "    while True:\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        total_loss = 0.0  # 每个step都要重置\n",
    "        eps = 1e-10\n",
    "        \n",
    "        # 计算Bp子图中所有节点的特征\n",
    "        bp_node_features = []\n",
    "        for node_idx in bp_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "        bp_features = torch.stack(bp_node_features)\n",
    "        \n",
    "        # 计算Bu子图中所有节点的特征\n",
    "        bu_node_features = []\n",
    "        for node_idx in bu_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "        bu_features = torch.stack(bu_node_features)\n",
    "        \n",
    "        # 计算loss\n",
    "        center_feature = bp_features[0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_nodes[1:]\n",
    "        other_bp_features = bp_features[1:]\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            # 计算中心节点到其他Bp节点的原始距离\n",
    "            orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "            # 计算P向量（基于节点路径距离）\n",
    "            P = np.exp(-orig_dists_bp)\n",
    "            P_sum = P.sum()\n",
    "            P = P / (P_sum + eps)\n",
    "            P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "        # 计算中心节点到其他Bp节点的特征距离\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "        feat_dists_bp.requires_grad_(True)\n",
    "\n",
    "        \n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "        \n",
    "#         log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        log_ratio = torch.log(((P + eps)/(Q + eps)))\n",
    "        loss_local = log_ratio.mean() \n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features_ = bu_features[1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features_).squeeze(0)\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        \n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))\n",
    "\n",
    "        # = 5. 损失合并 =\n",
    "        total_loss += loss_local + loss_global\n",
    "        \n",
    "        total_loss.backward()\n",
    "        \n",
    "        # 添加梯度裁剪\n",
    "        torch.nn.utils.clip_grad_norm_(bsne_model.parameters(), max_norm=1.0)\n",
    "        \n",
    "        optimizer.step()\n",
    "        \n",
    "        print(f\"Step {step}: BSNE_Loss={total_loss.item():.4f},local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        step += 1\n",
    "        \n",
    "        if total_loss.item() < loss_threshold or step >= max_steps_per_center:\n",
    "            print(f\"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}\")\n",
    "            break\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "92889355",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "fba1fc2d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "\n",
      "=== Pretraining Epoch 0 (Center Node: 268) ===\n",
      "Q: tensor([0.0567, 0.0325, 0.0172, 0.0087, 0.0100, 0.0257, 0.0048, 0.0265, 0.0044,\n",
      "        0.0421, 0.0046, 0.0084, 0.0290, 0.0204, 0.0084, 0.0375, 0.0116, 0.0259,\n",
      "        0.0067, 0.0181, 0.0109, 0.0125, 0.0315, 0.0247, 0.0337, 0.0034, 0.0086,\n",
      "        0.0250, 0.0131, 0.0465, 0.0318, 0.0124, 0.0526, 0.0124, 0.0329, 0.0032,\n",
      "        0.0273, 0.0091, 0.0300, 0.0081, 0.0050, 0.0187, 0.0124, 0.0242, 0.0295,\n",
      "        0.0185, 0.0043, 0.0095, 0.0151, 0.0337], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 0: BSNE_Loss=0.1575,local_loss: 0.15753227778927745,global_loss: 0.05033081769943237\n",
      "Q: tensor([0.0225, 0.0447, 0.0138, 0.0408, 0.0385, 0.0172, 0.0291, 0.0245, 0.0280,\n",
      "        0.0102, 0.0071, 0.0227, 0.0093, 0.0253, 0.0103, 0.0143, 0.0061, 0.0072,\n",
      "        0.0131, 0.0234, 0.0117, 0.0127, 0.0399, 0.0334, 0.0131, 0.0147, 0.0089,\n",
      "        0.0064, 0.0108, 0.0125, 0.0365, 0.0101, 0.0046, 0.0158, 0.0119, 0.0089,\n",
      "        0.0248, 0.0165, 0.0043, 0.0543, 0.0199, 0.0314, 0.0173, 0.0155, 0.0293,\n",
      "        0.0477, 0.0183, 0.0218, 0.0063, 0.0327], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 1: BSNE_Loss=0.0940,local_loss: 0.09404606058211107,global_loss: 0.24183236062526703\n",
      "Q: tensor([0.0989, 0.0119, 0.0131, 0.0079, 0.0149, 0.0278, 0.0025, 0.0058, 0.0221,\n",
      "        0.0168, 0.0092, 0.0100, 0.0421, 0.0208, 0.0666, 0.0150, 0.0106, 0.0067,\n",
      "        0.0250, 0.0271, 0.0078, 0.0065, 0.0080, 0.0116, 0.0306, 0.0092, 0.0066,\n",
      "        0.0156, 0.0043, 0.0277, 0.0187, 0.0204, 0.0180, 0.0101, 0.0178, 0.0190,\n",
      "        0.0430, 0.0557, 0.0313, 0.0084, 0.0123, 0.0257, 0.0035, 0.0589, 0.0180,\n",
      "        0.0046, 0.0129, 0.0137, 0.0034, 0.0220], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 2: BSNE_Loss=0.2182,local_loss: 0.21821381188569866,global_loss: 0.05846564099192619\n",
      "Q: tensor([0.0080, 0.0086, 0.0126, 0.0113, 0.0228, 0.0552, 0.0061, 0.0126, 0.0123,\n",
      "        0.0173, 0.0055, 0.0040, 0.0142, 0.0071, 0.0334, 0.0088, 0.0119, 0.0107,\n",
      "        0.0090, 0.0122, 0.0893, 0.0038, 0.0291, 0.0273, 0.0055, 0.0196, 0.0046,\n",
      "        0.0142, 0.0276, 0.0132, 0.0293, 0.0158, 0.0120, 0.0868, 0.0582, 0.0105,\n",
      "        0.0056, 0.0070, 0.0093, 0.0403, 0.0128, 0.0072, 0.0031, 0.0216, 0.0033,\n",
      "        0.0315, 0.0113, 0.0523, 0.0558, 0.0085], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 3: BSNE_Loss=0.2754,local_loss: 0.27535998785970345,global_loss: 0.2967510521411896\n",
      "Q: tensor([0.0109, 0.0210, 0.0181, 0.0163, 0.0039, 0.0042, 0.0092, 0.0360, 0.0202,\n",
      "        0.0079, 0.0239, 0.0167, 0.0156, 0.0234, 0.0455, 0.0100, 0.0022, 0.0317,\n",
      "        0.0045, 0.0147, 0.0161, 0.0251, 0.0221, 0.0077, 0.0090, 0.0219, 0.0148,\n",
      "        0.0035, 0.0582, 0.0151, 0.0085, 0.0244, 0.0112, 0.0068, 0.0227, 0.0289,\n",
      "        0.0256, 0.0578, 0.0283, 0.0246, 0.0209, 0.0207, 0.0141, 0.0058, 0.0132,\n",
      "        0.0391, 0.0212, 0.0421, 0.0250, 0.0293], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 4: BSNE_Loss=0.1301,local_loss: 0.13006672081616288,global_loss: 0.3330940008163452\n",
      "Q: tensor([0.0185, 0.0096, 0.0194, 0.0363, 0.0047, 0.0086, 0.0365, 0.0067, 0.0175,\n",
      "        0.0062, 0.0207, 0.0358, 0.0077, 0.1964, 0.0044, 0.0208, 0.0076, 0.0273,\n",
      "        0.0067, 0.0096, 0.0808, 0.0134, 0.0060, 0.0096, 0.0222, 0.0021, 0.0077,\n",
      "        0.0050, 0.0090, 0.0252, 0.0303, 0.0179, 0.0144, 0.0115, 0.0048, 0.0101,\n",
      "        0.0119, 0.0182, 0.0182, 0.0104, 0.0150, 0.0601, 0.0208, 0.0075, 0.0053,\n",
      "        0.0084, 0.0141, 0.0163, 0.0173, 0.0054], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 5: BSNE_Loss=0.3191,local_loss: 0.31906068691363515,global_loss: 0.07442721724510193\n",
      "Q: tensor([0.0227, 0.0070, 0.0133, 0.0156, 0.0109, 0.0162, 0.0337, 0.0054, 0.0108,\n",
      "        0.0134, 0.0150, 0.0095, 0.0448, 0.0944, 0.0248, 0.0163, 0.0073, 0.0294,\n",
      "        0.0370, 0.0102, 0.0137, 0.0091, 0.0151, 0.0095, 0.0154, 0.0133, 0.0087,\n",
      "        0.0152, 0.0046, 0.0137, 0.0137, 0.0372, 0.0239, 0.0063, 0.0168, 0.0064,\n",
      "        0.0475, 0.0060, 0.0128, 0.0099, 0.0090, 0.0213, 0.0121, 0.0207, 0.0677,\n",
      "        0.0550, 0.0163, 0.0163, 0.0284, 0.0168], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 10: BSNE_Loss=0.1468,local_loss: 0.14675627208792158,global_loss: 0.38349103927612305\n",
      "Q: tensor([0.0337, 0.0061, 0.0143, 0.0114, 0.0285, 0.0248, 0.0061, 0.0069, 0.0172,\n",
      "        0.0093, 0.0315, 0.0149, 0.0234, 0.0241, 0.0067, 0.0126, 0.0056, 0.0133,\n",
      "        0.0296, 0.0385, 0.0362, 0.0298, 0.0411, 0.0345, 0.0055, 0.0063, 0.0568,\n",
      "        0.0187, 0.0141, 0.0367, 0.0044, 0.0934, 0.0167, 0.0219, 0.0066, 0.0043,\n",
      "        0.0149, 0.0123, 0.0086, 0.0077, 0.0115, 0.0078, 0.0254, 0.0070, 0.0303,\n",
      "        0.0157, 0.0165, 0.0037, 0.0362, 0.0166], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 11: BSNE_Loss=0.1786,local_loss: 0.17857310947694907,global_loss: 0.1886475533246994\n",
      "Q: tensor([0.0273, 0.0121, 0.0189, 0.0078, 0.0056, 0.0952, 0.0640, 0.0051, 0.0355,\n",
      "        0.0083, 0.0182, 0.0093, 0.0235, 0.0262, 0.0070, 0.0176, 0.0109, 0.0576,\n",
      "        0.0066, 0.0317, 0.0160, 0.0157, 0.0250, 0.0097, 0.0038, 0.0046, 0.0088,\n",
      "        0.0027, 0.0139, 0.0138, 0.0166, 0.0505, 0.0152, 0.0168, 0.0048, 0.0111,\n",
      "        0.0135, 0.0238, 0.0040, 0.0113, 0.0046, 0.0089, 0.0438, 0.0070, 0.0284,\n",
      "        0.0025, 0.0519, 0.0425, 0.0171, 0.0233], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 12: BSNE_Loss=0.2552,local_loss: 0.25515522970774573,global_loss: 0.649613082408905\n",
      "Q: tensor([0.0271, 0.0068, 0.0422, 0.0091, 0.0312, 0.0463, 0.0205, 0.0284, 0.0239,\n",
      "        0.0143, 0.0475, 0.0052, 0.0188, 0.0101, 0.0140, 0.0100, 0.0136, 0.0156,\n",
      "        0.0125, 0.0432, 0.0046, 0.0130, 0.0174, 0.0256, 0.0078, 0.0033, 0.0186,\n",
      "        0.0359, 0.0215, 0.0272, 0.0083, 0.0284, 0.0046, 0.0014, 0.0210, 0.0178,\n",
      "        0.0436, 0.0083, 0.0125, 0.0101, 0.0094, 0.0275, 0.0143, 0.0106, 0.0172,\n",
      "        0.0557, 0.0311, 0.0166, 0.0089, 0.0373], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 13: BSNE_Loss=0.1446,local_loss: 0.14459476814910574,global_loss: 0.2253836989402771\n",
      "Q: tensor([0.0158, 0.0286, 0.0116, 0.0075, 0.0361, 0.0070, 0.0064, 0.0161, 0.0343,\n",
      "        0.0209, 0.0230, 0.0231, 0.0175, 0.0359, 0.0045, 0.0436, 0.0092, 0.0086,\n",
      "        0.0352, 0.0143, 0.0154, 0.0182, 0.0139, 0.0530, 0.0061, 0.0223, 0.0120,\n",
      "        0.0154, 0.0051, 0.0148, 0.0117, 0.0035, 0.0116, 0.0049, 0.0047, 0.0044,\n",
      "        0.0035, 0.0054, 0.0087, 0.0265, 0.1620, 0.0113, 0.0118, 0.0299, 0.0223,\n",
      "        0.0486, 0.0182, 0.0039, 0.0232, 0.0086], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 14: BSNE_Loss=0.2665,local_loss: 0.2665060338595931,global_loss: 0.36551541090011597\n",
      "Q: tensor([0.0178, 0.0046, 0.0074, 0.0184, 0.0153, 0.0039, 0.0062, 0.0292, 0.0260,\n",
      "        0.0071, 0.0253, 0.0256, 0.0216, 0.1538, 0.0114, 0.0042, 0.0126, 0.0053,\n",
      "        0.0082, 0.0187, 0.0119, 0.0253, 0.0267, 0.0157, 0.0213, 0.0161, 0.0105,\n",
      "        0.0091, 0.0030, 0.0213, 0.0448, 0.0551, 0.0271, 0.0129, 0.0172, 0.0084,\n",
      "        0.0279, 0.0379, 0.0124, 0.0119, 0.0221, 0.0173, 0.0116, 0.0113, 0.0317,\n",
      "        0.0134, 0.0074, 0.0068, 0.0195, 0.0198], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 15: BSNE_Loss=0.1970,local_loss: 0.1969697301129898,global_loss: 0.2926000654697418\n",
      "Q: tensor([0.0173, 0.0267, 0.0261, 0.0100, 0.0067, 0.0213, 0.0051, 0.0145, 0.0106,\n",
      "        0.0315, 0.0371, 0.0153, 0.0151, 0.0142, 0.0059, 0.0639, 0.0239, 0.0245,\n",
      "        0.0041, 0.0206, 0.0429, 0.0138, 0.0240, 0.0346, 0.0543, 0.0035, 0.0101,\n",
      "        0.0158, 0.0236, 0.0050, 0.0153, 0.0228, 0.0268, 0.0201, 0.0193, 0.0412,\n",
      "        0.0387, 0.0076, 0.0113, 0.0169, 0.0396, 0.0150, 0.0087, 0.0104, 0.0089,\n",
      "        0.0275, 0.0117, 0.0046, 0.0126, 0.0191], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 16: BSNE_Loss=0.1195,local_loss: 0.11954117951190635,global_loss: 0.2804037928581238\n",
      "Q: tensor([0.0097, 0.0871, 0.0140, 0.0312, 0.0063, 0.0224, 0.0133, 0.0129, 0.0187,\n",
      "        0.0108, 0.0120, 0.0353, 0.0371, 0.0418, 0.0119, 0.0099, 0.0133, 0.0174,\n",
      "        0.0162, 0.0069, 0.0319, 0.0106, 0.0245, 0.0041, 0.0144, 0.0191, 0.0516,\n",
      "        0.0091, 0.0205, 0.0134, 0.0102, 0.0271, 0.0212, 0.0139, 0.0222, 0.0075,\n",
      "        0.0064, 0.0528, 0.0087, 0.0397, 0.0368, 0.0177, 0.0036, 0.0190, 0.0109,\n",
      "        0.0090, 0.0142, 0.0177, 0.0286, 0.0050], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 17: BSNE_Loss=0.1354,local_loss: 0.13540850522866352,global_loss: 0.12380680441856384\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0042, 0.0115, 0.0112, 0.0108, 0.0234, 0.0108, 0.0227, 0.0062, 0.0045,\n",
      "        0.0079, 0.0179, 0.0517, 0.0139, 0.0328, 0.0577, 0.0440, 0.0172, 0.0779,\n",
      "        0.0206, 0.0183, 0.0337, 0.0148, 0.0264, 0.0188, 0.0113, 0.0038, 0.0411,\n",
      "        0.0147, 0.0134, 0.0084, 0.0065, 0.0058, 0.0326, 0.0118, 0.0219, 0.0045,\n",
      "        0.0199, 0.0207, 0.0137, 0.0210, 0.0171, 0.0303, 0.0056, 0.0091, 0.0280,\n",
      "        0.0333, 0.0244, 0.0071, 0.0295, 0.0055], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 18: BSNE_Loss=0.1584,local_loss: 0.15843349031594883,global_loss: 0.1358748972415924\n",
      "Q: tensor([0.0153, 0.0084, 0.0315, 0.0100, 0.0258, 0.0189, 0.0177, 0.0148, 0.0178,\n",
      "        0.0135, 0.0378, 0.0406, 0.0253, 0.0158, 0.0257, 0.0398, 0.0217, 0.0077,\n",
      "        0.0321, 0.0059, 0.0071, 0.0071, 0.0198, 0.0375, 0.0036, 0.0128, 0.0114,\n",
      "        0.0215, 0.0313, 0.0107, 0.0231, 0.0430, 0.0129, 0.0082, 0.0304, 0.0188,\n",
      "        0.0514, 0.0174, 0.0114, 0.0170, 0.0295, 0.0159, 0.0185, 0.0071, 0.0103,\n",
      "        0.0082, 0.0143, 0.0213, 0.0283, 0.0240], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 19: BSNE_Loss=0.0643,local_loss: 0.06425331833433692,global_loss: 0.36176183819770813\n",
      "Q: tensor([0.0263, 0.0453, 0.0147, 0.0027, 0.0091, 0.0136, 0.0137, 0.0223, 0.0068,\n",
      "        0.0149, 0.0172, 0.0202, 0.0861, 0.0091, 0.0190, 0.0100, 0.0088, 0.0173,\n",
      "        0.0085, 0.0160, 0.0269, 0.0258, 0.0089, 0.0377, 0.0070, 0.0274, 0.0056,\n",
      "        0.0078, 0.0090, 0.0394, 0.0096, 0.0064, 0.0254, 0.0551, 0.0120, 0.0112,\n",
      "        0.0225, 0.0038, 0.0666, 0.0265, 0.0334, 0.0080, 0.0143, 0.0420, 0.0039,\n",
      "        0.0182, 0.0137, 0.0189, 0.0278, 0.0037], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 20: BSNE_Loss=0.1902,local_loss: 0.19020269597556372,global_loss: 0.2897888123989105\n",
      "Q: tensor([0.0322, 0.0234, 0.0067, 0.0111, 0.0200, 0.0389, 0.0197, 0.0231, 0.0097,\n",
      "        0.0092, 0.0159, 0.0375, 0.0044, 0.0307, 0.0302, 0.0074, 0.0241, 0.0290,\n",
      "        0.0191, 0.0086, 0.0176, 0.0189, 0.0053, 0.0126, 0.0078, 0.0102, 0.0099,\n",
      "        0.0095, 0.0139, 0.0193, 0.0181, 0.0104, 0.0200, 0.0187, 0.0076, 0.0209,\n",
      "        0.0329, 0.0438, 0.0243, 0.0141, 0.0163, 0.0156, 0.0119, 0.0190, 0.0329,\n",
      "        0.0145, 0.0441, 0.0919, 0.0083, 0.0085], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 21: BSNE_Loss=0.0977,local_loss: 0.09773289243776281,global_loss: 0.37749141454696655\n",
      "Q: tensor([0.0520, 0.0072, 0.0085, 0.0089, 0.0141, 0.0043, 0.0201, 0.0053, 0.0558,\n",
      "        0.0922, 0.0081, 0.0088, 0.0251, 0.0043, 0.0066, 0.0230, 0.0200, 0.0100,\n",
      "        0.0880, 0.0200, 0.0358, 0.0140, 0.0055, 0.0108, 0.0173, 0.0046, 0.0273,\n",
      "        0.0319, 0.0567, 0.0015, 0.0084, 0.0058, 0.0056, 0.0307, 0.0074, 0.0093,\n",
      "        0.0057, 0.0297, 0.0106, 0.0306, 0.0297, 0.0054, 0.0130, 0.0165, 0.0142,\n",
      "        0.0186, 0.0095, 0.0257, 0.0208, 0.0152], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 22: BSNE_Loss=0.2678,local_loss: 0.26775271663444045,global_loss: 0.39213407039642334\n",
      "Q: tensor([0.0289, 0.0045, 0.0054, 0.0078, 0.0087, 0.0099, 0.0666, 0.0055, 0.0256,\n",
      "        0.0063, 0.0908, 0.0073, 0.0140, 0.0192, 0.0090, 0.0427, 0.0178, 0.0188,\n",
      "        0.0068, 0.0089, 0.0208, 0.0091, 0.0130, 0.0270, 0.0042, 0.0094, 0.0070,\n",
      "        0.0233, 0.0089, 0.0038, 0.0047, 0.1038, 0.0076, 0.0752, 0.0033, 0.0297,\n",
      "        0.0205, 0.0092, 0.0218, 0.0079, 0.0119, 0.0207, 0.0125, 0.0199, 0.0129,\n",
      "        0.0235, 0.0204, 0.0073, 0.0520, 0.0040], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 23: BSNE_Loss=0.2997,local_loss: 0.29972774641136996,global_loss: 0.10857682675123215\n",
      "Q: tensor([0.0349, 0.0137, 0.0244, 0.0446, 0.0056, 0.0197, 0.0147, 0.0131, 0.0174,\n",
      "        0.0151, 0.0165, 0.0111, 0.0033, 0.0276, 0.0128, 0.0119, 0.0875, 0.0098,\n",
      "        0.0043, 0.0153, 0.0232, 0.0142, 0.0326, 0.0064, 0.0438, 0.0771, 0.0073,\n",
      "        0.0093, 0.0110, 0.0179, 0.0053, 0.0140, 0.0134, 0.0060, 0.0107, 0.0079,\n",
      "        0.0507, 0.0552, 0.0135, 0.0136, 0.0113, 0.0385, 0.0076, 0.0083, 0.0556,\n",
      "        0.0052, 0.0109, 0.0056, 0.0133, 0.0071], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 24: BSNE_Loss=0.2204,local_loss: 0.2203617679304864,global_loss: 0.4019179940223694\n",
      "Q: tensor([0.0314, 0.0321, 0.0191, 0.0250, 0.0418, 0.0205, 0.0296, 0.0090, 0.0136,\n",
      "        0.0174, 0.0156, 0.0270, 0.0062, 0.0202, 0.0096, 0.0066, 0.0786, 0.0219,\n",
      "        0.0170, 0.0205, 0.0056, 0.0343, 0.0110, 0.0124, 0.0230, 0.0208, 0.0153,\n",
      "        0.0313, 0.0100, 0.0214, 0.0117, 0.0259, 0.0122, 0.0138, 0.0219, 0.0241,\n",
      "        0.0176, 0.0121, 0.0213, 0.0117, 0.0353, 0.0214, 0.0205, 0.0099, 0.0073,\n",
      "        0.0133, 0.0113, 0.0133, 0.0236, 0.0243], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 25: BSNE_Loss=0.0414,local_loss: 0.0413993067540658,global_loss: 0.48702603578567505\n",
      "Center node 268 finished at step 26 with loss 0.0414\n",
      "\n",
      "=== Pretraining Epoch 1 (Center Node: 7572) ===\n",
      "Q: tensor([0.0258, 0.0204, 0.0129, 0.0175, 0.0086, 0.0233, 0.0267, 0.0213, 0.0121,\n",
      "        0.0105, 0.0303, 0.0289, 0.0080, 0.0303, 0.0102, 0.0181, 0.0042, 0.0340,\n",
      "        0.0316, 0.0329, 0.0154, 0.0380, 0.0195, 0.0040, 0.0067, 0.0262, 0.0120,\n",
      "        0.0170, 0.0067, 0.0038, 0.0510, 0.0168, 0.0103, 0.0130, 0.0138, 0.0158,\n",
      "        0.0117, 0.0057, 0.0262, 0.0142, 0.0075, 0.0050, 0.0611, 0.0250, 0.1049,\n",
      "        0.0099, 0.0032, 0.0164, 0.0110, 0.0208], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 0: BSNE_Loss=0.1443,local_loss: 0.14432008936625276,global_loss: 0.36543989181518555\n",
      "Q: tensor([0.0149, 0.0280, 0.0425, 0.0161, 0.0072, 0.0033, 0.0070, 0.0113, 0.0185,\n",
      "        0.0409, 0.0093, 0.0193, 0.0160, 0.0121, 0.0209, 0.0111, 0.0066, 0.0106,\n",
      "        0.0193, 0.0123, 0.0321, 0.0402, 0.0310, 0.0123, 0.0144, 0.0370, 0.0076,\n",
      "        0.0147, 0.0158, 0.0381, 0.0086, 0.0532, 0.0033, 0.0062, 0.0168, 0.0341,\n",
      "        0.0051, 0.0347, 0.0295, 0.0142, 0.0285, 0.0167, 0.0440, 0.0185, 0.0144,\n",
      "        0.0118, 0.0366, 0.0221, 0.0177, 0.0135], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 1: BSNE_Loss=0.0788,local_loss: 0.0787959245612493,global_loss: 0.5752469897270203\n",
      "Q: tensor([0.0164, 0.0382, 0.0089, 0.0081, 0.0175, 0.0079, 0.0182, 0.0147, 0.0168,\n",
      "        0.0357, 0.0468, 0.0295, 0.0031, 0.0409, 0.0102, 0.0184, 0.0060, 0.0154,\n",
      "        0.0099, 0.0164, 0.0282, 0.0139, 0.0066, 0.0230, 0.0228, 0.0331, 0.0086,\n",
      "        0.0037, 0.0029, 0.0782, 0.0174, 0.0038, 0.0065, 0.0067, 0.0046, 0.0312,\n",
      "        0.0237, 0.0286, 0.0214, 0.0113, 0.0279, 0.0155, 0.0326, 0.0134, 0.0450,\n",
      "        0.0255, 0.0091, 0.0064, 0.0534, 0.0159], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 2: BSNE_Loss=0.1537,local_loss: 0.1536584273484946,global_loss: 0.576276421546936\n",
      "Q: tensor([0.0121, 0.0329, 0.0238, 0.0141, 0.0247, 0.0041, 0.0078, 0.0138, 0.0439,\n",
      "        0.0294, 0.0164, 0.0236, 0.0053, 0.0045, 0.0142, 0.0118, 0.0170, 0.0361,\n",
      "        0.0110, 0.0535, 0.0237, 0.0183, 0.0221, 0.0102, 0.0136, 0.0226, 0.0462,\n",
      "        0.0150, 0.0228, 0.0395, 0.0194, 0.0272, 0.0233, 0.0289, 0.0120, 0.0065,\n",
      "        0.0403, 0.0059, 0.0546, 0.0127, 0.0122, 0.0141, 0.0342, 0.0099, 0.0274,\n",
      "        0.0043, 0.0067, 0.0112, 0.0127, 0.0025], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 3: BSNE_Loss=0.1029,local_loss: 0.10286659478598206,global_loss: 0.4491479992866516\n",
      "Q: tensor([0.0040, 0.0117, 0.0125, 0.0196, 0.0006, 0.0034, 0.0693, 0.0146, 0.0032,\n",
      "        0.0327, 0.0095, 0.0423, 0.0095, 0.0465, 0.0082, 0.0281, 0.0074, 0.0316,\n",
      "        0.0042, 0.0251, 0.0087, 0.0443, 0.0043, 0.0484, 0.0239, 0.0356, 0.0082,\n",
      "        0.0085, 0.0053, 0.0118, 0.0134, 0.0076, 0.0113, 0.0887, 0.0152, 0.0068,\n",
      "        0.0212, 0.0037, 0.0294, 0.0056, 0.0266, 0.0237, 0.0180, 0.0087, 0.0426,\n",
      "        0.0354, 0.0057, 0.0068, 0.0362, 0.0104], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 4: BSNE_Loss=0.2782,local_loss: 0.2782365703107452,global_loss: 0.23236055672168732\n",
      "Q: tensor([0.0113, 0.0229, 0.0068, 0.0118, 0.0119, 0.0273, 0.0068, 0.0170, 0.0193,\n",
      "        0.0345, 0.0359, 0.0397, 0.0240, 0.0112, 0.0056, 0.0299, 0.0091, 0.0096,\n",
      "        0.0074, 0.0283, 0.0234, 0.0247, 0.0427, 0.0045, 0.0062, 0.0498, 0.0099,\n",
      "        0.0142, 0.0097, 0.0383, 0.0052, 0.0084, 0.0126, 0.0149, 0.0055, 0.0187,\n",
      "        0.0084, 0.0324, 0.0155, 0.0018, 0.0080, 0.0563, 0.0128, 0.0056, 0.0095,\n",
      "        0.0028, 0.0065, 0.1236, 0.0459, 0.0116], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 5: BSNE_Loss=0.2287,local_loss: 0.2287195835235321,global_loss: 0.0967910885810852\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0108, 0.0051, 0.0036, 0.0056, 0.0064, 0.0502, 0.0081, 0.0038, 0.0164,\n",
      "        0.0091, 0.0061, 0.0317, 0.0058, 0.0262, 0.0079, 0.0041, 0.0124, 0.0169,\n",
      "        0.0234, 0.0085, 0.0174, 0.0063, 0.0387, 0.0053, 0.0299, 0.0530, 0.0063,\n",
      "        0.0161, 0.0197, 0.0176, 0.0092, 0.0063, 0.0567, 0.0046, 0.0273, 0.0095,\n",
      "        0.0044, 0.0078, 0.0246, 0.0355, 0.0312, 0.0440, 0.0480, 0.0035, 0.1152,\n",
      "        0.0252, 0.0078, 0.0065, 0.0113, 0.0490], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 6: BSNE_Loss=0.2824,local_loss: 0.28236864919637983,global_loss: 0.5476335883140564\n",
      "Q: tensor([0.0154, 0.0310, 0.0351, 0.1603, 0.0092, 0.0057, 0.0216, 0.0182, 0.0256,\n",
      "        0.0152, 0.0067, 0.0366, 0.0164, 0.0039, 0.0199, 0.0130, 0.0146, 0.0357,\n",
      "        0.0084, 0.0085, 0.0257, 0.0139, 0.0241, 0.0206, 0.0041, 0.0084, 0.0062,\n",
      "        0.0090, 0.0444, 0.0072, 0.0157, 0.0052, 0.0109, 0.0263, 0.0151, 0.0064,\n",
      "        0.0145, 0.0226, 0.0094, 0.0279, 0.0043, 0.0181, 0.0068, 0.0105, 0.0224,\n",
      "        0.0200, 0.0092, 0.0112, 0.0613, 0.0178], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 7: BSNE_Loss=0.1893,local_loss: 0.1893129085588164,global_loss: 0.6088206171989441\n",
      "Q: tensor([0.0032, 0.0228, 0.0119, 0.0249, 0.0065, 0.0198, 0.0312, 0.0178, 0.0076,\n",
      "        0.0170, 0.0178, 0.0070, 0.0225, 0.0757, 0.0364, 0.0207, 0.0019, 0.0074,\n",
      "        0.0572, 0.0233, 0.0073, 0.0127, 0.0473, 0.0128, 0.0301, 0.0174, 0.0221,\n",
      "        0.0062, 0.0062, 0.0251, 0.0098, 0.0045, 0.0123, 0.0147, 0.0099, 0.0141,\n",
      "        0.0056, 0.0057, 0.0321, 0.0051, 0.0065, 0.0394, 0.0213, 0.0152, 0.0145,\n",
      "        0.0487, 0.0059, 0.0765, 0.0187, 0.0198], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 8: BSNE_Loss=0.1879,local_loss: 0.18785006736511542,global_loss: 0.3177746534347534\n",
      "Q: tensor([0.0089, 0.0259, 0.0281, 0.0156, 0.0119, 0.0113, 0.0180, 0.0212, 0.0396,\n",
      "        0.0148, 0.0035, 0.0295, 0.0048, 0.0237, 0.0123, 0.0171, 0.0177, 0.0034,\n",
      "        0.0287, 0.0188, 0.0109, 0.0335, 0.0190, 0.0117, 0.0104, 0.0085, 0.0203,\n",
      "        0.0124, 0.0139, 0.0177, 0.0098, 0.0366, 0.0134, 0.0389, 0.0122, 0.0062,\n",
      "        0.0196, 0.0163, 0.0284, 0.0537, 0.0094, 0.0037, 0.0111, 0.0061, 0.0125,\n",
      "        0.0885, 0.0299, 0.0443, 0.0240, 0.0222], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 9: BSNE_Loss=0.1032,local_loss: 0.10324255189583964,global_loss: 0.4873330295085907\n",
      "Q: tensor([0.0152, 0.0103, 0.0190, 0.0283, 0.0044, 0.0136, 0.0639, 0.0036, 0.0169,\n",
      "        0.0052, 0.0368, 0.0093, 0.0113, 0.0200, 0.0251, 0.0265, 0.0289, 0.0271,\n",
      "        0.0087, 0.0284, 0.0315, 0.0084, 0.0132, 0.0037, 0.0127, 0.0119, 0.0169,\n",
      "        0.0180, 0.0123, 0.0493, 0.0271, 0.0688, 0.0378, 0.0097, 0.0112, 0.0313,\n",
      "        0.0166, 0.0158, 0.0258, 0.0177, 0.0159, 0.0086, 0.0377, 0.0034, 0.0221,\n",
      "        0.0071, 0.0063, 0.0285, 0.0095, 0.0185], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 10: BSNE_Loss=0.1111,local_loss: 0.111111455739313,global_loss: 0.2774850130081177\n",
      "Q: tensor([0.0131, 0.0219, 0.0443, 0.0079, 0.0141, 0.0104, 0.0389, 0.0138, 0.0382,\n",
      "        0.0154, 0.0051, 0.0249, 0.0066, 0.0116, 0.0258, 0.0161, 0.0133, 0.0067,\n",
      "        0.0284, 0.0135, 0.0172, 0.0125, 0.0096, 0.0076, 0.0664, 0.0410, 0.0120,\n",
      "        0.0063, 0.0159, 0.0573, 0.0189, 0.0160, 0.0724, 0.0039, 0.0119, 0.0137,\n",
      "        0.0128, 0.0240, 0.0062, 0.0138, 0.0106, 0.0127, 0.0295, 0.0066, 0.0367,\n",
      "        0.0287, 0.0031, 0.0263, 0.0252, 0.0111], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 11: BSNE_Loss=0.1270,local_loss: 0.1270193757940363,global_loss: 0.48136043548583984\n",
      "Q: tensor([0.0066, 0.0129, 0.0171, 0.0160, 0.0163, 0.0077, 0.0156, 0.0160, 0.0129,\n",
      "        0.0073, 0.0058, 0.0039, 0.0930, 0.0073, 0.0179, 0.0338, 0.0288, 0.0873,\n",
      "        0.0048, 0.0143, 0.0089, 0.0161, 0.0163, 0.0320, 0.0094, 0.0244, 0.0646,\n",
      "        0.0228, 0.0110, 0.0147, 0.0150, 0.0148, 0.0140, 0.0102, 0.0197, 0.0381,\n",
      "        0.0225, 0.0413, 0.0189, 0.0066, 0.0080, 0.0069, 0.0208, 0.0211, 0.0362,\n",
      "        0.0094, 0.0073, 0.0067, 0.0147, 0.0221], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 12: BSNE_Loss=0.1461,local_loss: 0.14610560716697757,global_loss: 0.1213497668504715\n",
      "Q: tensor([0.0127, 0.0027, 0.0104, 0.0149, 0.0250, 0.0169, 0.0086, 0.0189, 0.0121,\n",
      "        0.0074, 0.0173, 0.0121, 0.0289, 0.0050, 0.0234, 0.0081, 0.0116, 0.0026,\n",
      "        0.0582, 0.0093, 0.0152, 0.0445, 0.0102, 0.0036, 0.0152, 0.0239, 0.0075,\n",
      "        0.0144, 0.0363, 0.0232, 0.1003, 0.0073, 0.0157, 0.0098, 0.0187, 0.0315,\n",
      "        0.0195, 0.0057, 0.0173, 0.0106, 0.0321, 0.0338, 0.0080, 0.0909, 0.0207,\n",
      "        0.0148, 0.0100, 0.0196, 0.0126, 0.0210], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 13: BSNE_Loss=0.1734,local_loss: 0.17335333912611717,global_loss: 0.5508843064308167\n",
      "Q: tensor([0.0033, 0.0160, 0.0266, 0.0238, 0.0130, 0.0412, 0.0044, 0.0225, 0.0139,\n",
      "        0.0275, 0.0503, 0.0235, 0.0092, 0.0198, 0.0237, 0.0122, 0.0038, 0.0153,\n",
      "        0.0146, 0.0404, 0.0103, 0.0115, 0.0310, 0.0347, 0.0143, 0.0174, 0.0296,\n",
      "        0.0149, 0.0163, 0.0222, 0.0212, 0.0181, 0.0184, 0.0191, 0.0198, 0.0124,\n",
      "        0.0207, 0.0321, 0.0064, 0.0070, 0.0166, 0.0075, 0.0349, 0.0266, 0.0399,\n",
      "        0.0404, 0.0109, 0.0131, 0.0225, 0.0051], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 14: BSNE_Loss=0.0482,local_loss: 0.04821205664892773,global_loss: 0.6824896931648254\n",
      "Center node 7572 finished at step 15 with loss 0.0482\n",
      "\n",
      "=== Pretraining Epoch 2 (Center Node: 7964) ===\n",
      "Q: tensor([0.0113, 0.0099, 0.0718, 0.1152, 0.0476, 0.0128, 0.0111, 0.0246, 0.0137,\n",
      "        0.0237, 0.0238, 0.0260, 0.0052, 0.0093, 0.0393, 0.0176, 0.0226, 0.0331,\n",
      "        0.0038, 0.0048, 0.0044, 0.0260, 0.0101, 0.0213, 0.0376, 0.0076, 0.0119,\n",
      "        0.0180, 0.0089, 0.0185, 0.0152, 0.0053, 0.0089, 0.0249, 0.0226, 0.0078,\n",
      "        0.0108, 0.0140, 0.0542, 0.0108, 0.0048, 0.0073, 0.0085, 0.0074, 0.0085,\n",
      "        0.0078, 0.0171, 0.0339, 0.0256, 0.0130], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 0: BSNE_Loss=0.1876,local_loss: 0.18762077813783384,global_loss: 0.2672172784805298\n",
      "Q: tensor([0.0114, 0.0133, 0.0071, 0.0055, 0.0085, 0.0374, 0.0065, 0.0103, 0.0090,\n",
      "        0.0035, 0.0142, 0.0368, 0.0119, 0.0703, 0.0063, 0.0163, 0.0217, 0.0170,\n",
      "        0.0098, 0.0049, 0.0084, 0.0153, 0.0141, 0.0355, 0.0214, 0.0111, 0.0128,\n",
      "        0.0535, 0.0118, 0.0221, 0.0111, 0.0090, 0.1281, 0.0360, 0.0090, 0.0204,\n",
      "        0.0079, 0.0106, 0.0160, 0.0080, 0.0104, 0.0116, 0.0191, 0.0143, 0.0099,\n",
      "        0.0303, 0.0490, 0.0230, 0.0356, 0.0132], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 1: BSNE_Loss=0.1806,local_loss: 0.18064182623003455,global_loss: 0.49197858572006226\n",
      "Q: tensor([0.0062, 0.0124, 0.0344, 0.0106, 0.0108, 0.0186, 0.0218, 0.0112, 0.0056,\n",
      "        0.0374, 0.0262, 0.0142, 0.0217, 0.0248, 0.0098, 0.0077, 0.0277, 0.0174,\n",
      "        0.0197, 0.0336, 0.0156, 0.0174, 0.0109, 0.0103, 0.0232, 0.0166, 0.0057,\n",
      "        0.0105, 0.0188, 0.0788, 0.0048, 0.0440, 0.0106, 0.0050, 0.0160, 0.0155,\n",
      "        0.0079, 0.0231, 0.0119, 0.0505, 0.0214, 0.0073, 0.0325, 0.0693, 0.0124,\n",
      "        0.0086, 0.0104, 0.0261, 0.0348, 0.0081], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 2: BSNE_Loss=0.1115,local_loss: 0.11147531485230053,global_loss: 0.4997243881225586\n",
      "Q: tensor([0.0217, 0.0284, 0.0248, 0.0224, 0.0147, 0.0333, 0.0121, 0.0049, 0.0177,\n",
      "        0.0166, 0.0111, 0.0165, 0.0330, 0.0078, 0.0218, 0.0316, 0.0066, 0.0241,\n",
      "        0.0116, 0.0223, 0.0221, 0.0528, 0.0127, 0.0107, 0.0084, 0.0088, 0.0270,\n",
      "        0.0136, 0.0193, 0.0097, 0.0207, 0.0249, 0.0163, 0.0060, 0.0169, 0.0335,\n",
      "        0.0220, 0.0326, 0.0200, 0.0087, 0.0071, 0.0257, 0.0233, 0.0088, 0.0206,\n",
      "        0.0406, 0.0469, 0.0251, 0.0081, 0.0243], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 3: BSNE_Loss=0.0289,local_loss: 0.028864508107801987,global_loss: 0.6685243844985962\n",
      "Center node 7964 finished at step 4 with loss 0.0289\n",
      "\n",
      "=== Pretraining Epoch 3 (Center Node: 7312) ===\n",
      "Q: tensor([0.0278, 0.0148, 0.0023, 0.0181, 0.0254, 0.0058, 0.0508, 0.0037, 0.0424,\n",
      "        0.0267, 0.0162, 0.0052, 0.0116, 0.0208, 0.0327, 0.0220, 0.0364, 0.0318,\n",
      "        0.0248, 0.0107, 0.0300, 0.0298, 0.0107, 0.0264, 0.0309, 0.0084, 0.0083,\n",
      "        0.0206, 0.0061, 0.0086, 0.0153, 0.0148, 0.0066, 0.0186, 0.0158, 0.0063,\n",
      "        0.0064, 0.0829, 0.0118, 0.0099, 0.0151, 0.0048, 0.0122, 0.0589, 0.0050,\n",
      "        0.0263, 0.0179, 0.0242, 0.0288, 0.0088], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 0: BSNE_Loss=0.1513,local_loss: 0.15133149140846724,global_loss: 0.25713488459587097\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0153, 0.0119, 0.0387, 0.0705, 0.0121, 0.0131, 0.0211, 0.0329, 0.0187,\n",
      "        0.0071, 0.0086, 0.0120, 0.0271, 0.0131, 0.0042, 0.0080, 0.0281, 0.0241,\n",
      "        0.0127, 0.0286, 0.0196, 0.0165, 0.0241, 0.0132, 0.0078, 0.0155, 0.0288,\n",
      "        0.0079, 0.0084, 0.0127, 0.0258, 0.0141, 0.0114, 0.0380, 0.0107, 0.0302,\n",
      "        0.0220, 0.0669, 0.0172, 0.0243, 0.0162, 0.0087, 0.0074, 0.0413, 0.0243,\n",
      "        0.0162, 0.0212, 0.0096, 0.0050, 0.0273], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 1: BSNE_Loss=0.0715,local_loss: 0.07150864141907629,global_loss: 0.42118820548057556\n",
      "Q: tensor([0.0233, 0.0263, 0.0072, 0.0251, 0.0161, 0.0044, 0.0048, 0.0449, 0.0026,\n",
      "        0.0221, 0.0108, 0.0503, 0.0144, 0.0507, 0.0192, 0.0081, 0.0378, 0.0481,\n",
      "        0.0157, 0.0105, 0.0256, 0.0380, 0.0144, 0.0068, 0.0077, 0.0222, 0.0058,\n",
      "        0.0073, 0.0861, 0.0076, 0.0199, 0.0337, 0.0109, 0.0088, 0.0120, 0.0098,\n",
      "        0.0075, 0.0102, 0.0097, 0.0107, 0.0076, 0.0208, 0.0035, 0.0101, 0.0062,\n",
      "        0.0158, 0.0050, 0.0121, 0.1101, 0.0120], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 2: BSNE_Loss=0.2406,local_loss: 0.24062778472674623,global_loss: 0.19431152939796448\n",
      "Q: tensor([0.0483, 0.0277, 0.0182, 0.0264, 0.0103, 0.0145, 0.0020, 0.0223, 0.0048,\n",
      "        0.0469, 0.0109, 0.0385, 0.0069, 0.0058, 0.0090, 0.0142, 0.0089, 0.0374,\n",
      "        0.0173, 0.0169, 0.0459, 0.0051, 0.0096, 0.0252, 0.0200, 0.0092, 0.0849,\n",
      "        0.0133, 0.0157, 0.0177, 0.0324, 0.0220, 0.0411, 0.0137, 0.0068, 0.0302,\n",
      "        0.0048, 0.0419, 0.0027, 0.0138, 0.0155, 0.0107, 0.0168, 0.0258, 0.0126,\n",
      "        0.0170, 0.0064, 0.0109, 0.0284, 0.0132], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 3: BSNE_Loss=0.1543,local_loss: 0.15430513403193172,global_loss: 0.19472722709178925\n",
      "Q: tensor([0.0163, 0.0090, 0.0430, 0.0287, 0.0079, 0.0249, 0.0125, 0.0054, 0.0196,\n",
      "        0.0582, 0.0103, 0.0115, 0.0256, 0.0697, 0.0132, 0.0129, 0.0049, 0.0058,\n",
      "        0.0281, 0.0196, 0.0130, 0.0369, 0.0274, 0.0145, 0.0102, 0.0101, 0.0302,\n",
      "        0.0046, 0.0111, 0.0117, 0.0195, 0.0244, 0.0093, 0.0222, 0.0264, 0.0060,\n",
      "        0.0155, 0.0320, 0.0055, 0.0164, 0.0062, 0.0109, 0.0048, 0.0963, 0.0248,\n",
      "        0.0303, 0.0109, 0.0211, 0.0120, 0.0090], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 4: BSNE_Loss=0.1470,local_loss: 0.1470221952559189,global_loss: 0.741942286491394\n",
      "Q: tensor([0.0256, 0.1303, 0.0421, 0.0135, 0.0267, 0.0072, 0.0082, 0.0144, 0.0151,\n",
      "        0.0037, 0.0074, 0.0169, 0.0089, 0.0181, 0.0145, 0.0392, 0.0168, 0.0106,\n",
      "        0.0602, 0.0252, 0.0352, 0.0096, 0.0077, 0.0107, 0.0187, 0.0164, 0.0035,\n",
      "        0.0129, 0.0146, 0.0239, 0.0336, 0.0411, 0.0068, 0.0092, 0.0025, 0.0123,\n",
      "        0.0209, 0.0145, 0.0312, 0.0258, 0.0120, 0.0184, 0.0041, 0.0076, 0.0133,\n",
      "        0.0100, 0.0385, 0.0107, 0.0135, 0.0160], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 5: BSNE_Loss=0.1753,local_loss: 0.17534034681513044,global_loss: -0.0438464917242527\n",
      "Q: tensor([0.0034, 0.0162, 0.0112, 0.0113, 0.0195, 0.0118, 0.0206, 0.0347, 0.0164,\n",
      "        0.0216, 0.0167, 0.0281, 0.0088, 0.0185, 0.0397, 0.0055, 0.0817, 0.0187,\n",
      "        0.0107, 0.0393, 0.0090, 0.0087, 0.0515, 0.0153, 0.0044, 0.0200, 0.0243,\n",
      "        0.0207, 0.0142, 0.0173, 0.0067, 0.0316, 0.0059, 0.0095, 0.0060, 0.0099,\n",
      "        0.0110, 0.0323, 0.0052, 0.0092, 0.0053, 0.0084, 0.0324, 0.1139, 0.0105,\n",
      "        0.0219, 0.0141, 0.0058, 0.0303, 0.0104], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 6: BSNE_Loss=0.1844,local_loss: 0.18440469496373685,global_loss: 0.3871908485889435\n",
      "Q: tensor([0.0668, 0.0443, 0.0189, 0.0102, 0.0524, 0.0081, 0.0296, 0.0098, 0.0124,\n",
      "        0.0077, 0.0652, 0.0257, 0.0364, 0.0262, 0.0117, 0.0046, 0.0270, 0.0134,\n",
      "        0.0073, 0.0088, 0.0153, 0.0235, 0.0358, 0.0224, 0.0074, 0.0217, 0.0217,\n",
      "        0.0134, 0.0104, 0.0211, 0.0056, 0.0061, 0.0185, 0.0163, 0.0128, 0.0114,\n",
      "        0.0203, 0.0126, 0.0109, 0.0153, 0.0132, 0.0135, 0.0156, 0.0226, 0.0248,\n",
      "        0.0112, 0.0085, 0.0316, 0.0215, 0.0282], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 7: BSNE_Loss=0.0770,local_loss: 0.07697627603056346,global_loss: 0.5145770311355591\n",
      "Q: tensor([0.0053, 0.0229, 0.0334, 0.0216, 0.0400, 0.0337, 0.0114, 0.0096, 0.0263,\n",
      "        0.0191, 0.0084, 0.0066, 0.0036, 0.0149, 0.0033, 0.0035, 0.0069, 0.0105,\n",
      "        0.0196, 0.1019, 0.0092, 0.0080, 0.0190, 0.0284, 0.0551, 0.0072, 0.0306,\n",
      "        0.0119, 0.0176, 0.0469, 0.0247, 0.0271, 0.0079, 0.0136, 0.0019, 0.0116,\n",
      "        0.0086, 0.0226, 0.0092, 0.0431, 0.0056, 0.0051, 0.0110, 0.0076, 0.0131,\n",
      "        0.0064, 0.0180, 0.0057, 0.0261, 0.0945], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 8: BSNE_Loss=0.2557,local_loss: 0.2557224770992964,global_loss: -0.01684371381998062\n",
      "Q: tensor([0.0082, 0.0620, 0.0076, 0.0772, 0.0160, 0.0347, 0.0047, 0.0188, 0.0093,\n",
      "        0.0102, 0.0234, 0.0206, 0.0125, 0.0031, 0.0060, 0.0350, 0.0067, 0.0133,\n",
      "        0.0041, 0.0070, 0.0133, 0.1115, 0.0073, 0.0055, 0.0189, 0.0126, 0.0072,\n",
      "        0.0313, 0.0132, 0.0085, 0.0120, 0.0057, 0.0490, 0.0121, 0.0109, 0.0385,\n",
      "        0.0037, 0.0265, 0.0415, 0.0143, 0.0251, 0.0092, 0.0045, 0.0070, 0.0235,\n",
      "        0.0332, 0.0033, 0.0257, 0.0289, 0.0161], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 9: BSNE_Loss=0.2539,local_loss: 0.2538540529107432,global_loss: 0.3316289484500885\n",
      "Q: tensor([0.0188, 0.0207, 0.0030, 0.0141, 0.0242, 0.0336, 0.0128, 0.0155, 0.0172,\n",
      "        0.0117, 0.0359, 0.0175, 0.0119, 0.0627, 0.0024, 0.0068, 0.0067, 0.0055,\n",
      "        0.0197, 0.0305, 0.0304, 0.0135, 0.0217, 0.0352, 0.0334, 0.0079, 0.0033,\n",
      "        0.0045, 0.0235, 0.0032, 0.0452, 0.1115, 0.0403, 0.0091, 0.0295, 0.0150,\n",
      "        0.0319, 0.0169, 0.0092, 0.0173, 0.0141, 0.0116, 0.0094, 0.0060, 0.0223,\n",
      "        0.0067, 0.0117, 0.0218, 0.0162, 0.0066], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 10: BSNE_Loss=0.1931,local_loss: 0.19312713078387728,global_loss: 0.3551950454711914\n",
      "Q: tensor([0.0108, 0.0344, 0.0314, 0.0033, 0.0198, 0.0056, 0.0154, 0.0083, 0.0163,\n",
      "        0.0187, 0.0354, 0.0062, 0.0062, 0.0128, 0.0209, 0.0061, 0.0367, 0.0847,\n",
      "        0.0220, 0.0238, 0.0327, 0.0617, 0.0257, 0.0109, 0.0607, 0.0038, 0.0081,\n",
      "        0.0119, 0.0030, 0.0113, 0.0072, 0.0070, 0.0182, 0.0182, 0.0207, 0.0180,\n",
      "        0.0455, 0.0154, 0.0161, 0.0039, 0.0373, 0.0189, 0.0094, 0.0091, 0.0250,\n",
      "        0.0024, 0.0145, 0.0535, 0.0058, 0.0050], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 11: BSNE_Loss=0.2196,local_loss: 0.2196378207337447,global_loss: 0.29692542552948\n",
      "Q: tensor([0.0152, 0.0252, 0.0178, 0.0174, 0.0092, 0.0101, 0.0064, 0.0063, 0.0237,\n",
      "        0.0061, 0.0058, 0.0089, 0.0241, 0.0266, 0.0101, 0.0106, 0.0304, 0.0110,\n",
      "        0.0199, 0.0552, 0.0277, 0.0107, 0.0064, 0.0128, 0.0278, 0.0067, 0.0068,\n",
      "        0.0045, 0.0139, 0.0091, 0.0180, 0.0115, 0.0135, 0.0102, 0.0076, 0.1128,\n",
      "        0.0230, 0.0292, 0.0829, 0.0068, 0.0372, 0.0142, 0.0128, 0.0125, 0.0174,\n",
      "        0.0122, 0.0054, 0.0386, 0.0449, 0.0228], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 12: BSNE_Loss=0.1761,local_loss: 0.17609967183670322,global_loss: 0.41672131419181824\n",
      "Q: tensor([0.0216, 0.0033, 0.0076, 0.0112, 0.0035, 0.0169, 0.0051, 0.0047, 0.0058,\n",
      "        0.0255, 0.0248, 0.0054, 0.0085, 0.0222, 0.0221, 0.0040, 0.0338, 0.0051,\n",
      "        0.0387, 0.0219, 0.0050, 0.0135, 0.0161, 0.0747, 0.0155, 0.0112, 0.0394,\n",
      "        0.0139, 0.0274, 0.0321, 0.0386, 0.0211, 0.0377, 0.0092, 0.0162, 0.0058,\n",
      "        0.0137, 0.0547, 0.0098, 0.0069, 0.0113, 0.0055, 0.0130, 0.0520, 0.0061,\n",
      "        0.0249, 0.0464, 0.0361, 0.0206, 0.0297], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 13: BSNE_Loss=0.1904,local_loss: 0.19042074794133712,global_loss: 0.20979757606983185\n",
      "Q: tensor([0.0214, 0.0318, 0.0540, 0.0192, 0.0134, 0.0250, 0.0076, 0.0030, 0.0152,\n",
      "        0.0137, 0.0161, 0.0194, 0.0044, 0.0051, 0.0040, 0.0087, 0.0130, 0.0179,\n",
      "        0.0332, 0.0221, 0.0155, 0.0190, 0.0185, 0.0162, 0.0215, 0.0283, 0.0082,\n",
      "        0.0024, 0.0089, 0.0087, 0.0519, 0.0249, 0.0374, 0.0219, 0.0074, 0.0134,\n",
      "        0.0088, 0.0232, 0.0484, 0.0052, 0.0081, 0.0056, 0.0263, 0.0349, 0.0279,\n",
      "        0.0053, 0.0064, 0.0077, 0.1181, 0.0215], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 14: BSNE_Loss=0.1987,local_loss: 0.19871278626443245,global_loss: -0.1024719700217247\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0106, 0.0321, 0.0115, 0.0162, 0.0100, 0.0043, 0.0123, 0.0378, 0.0045,\n",
      "        0.0047, 0.0503, 0.0074, 0.0374, 0.0024, 0.0353, 0.0115, 0.0294, 0.0114,\n",
      "        0.0088, 0.0095, 0.0759, 0.0147, 0.0183, 0.0467, 0.0104, 0.0155, 0.0307,\n",
      "        0.0148, 0.0079, 0.0112, 0.0225, 0.0249, 0.0341, 0.0092, 0.0256, 0.0256,\n",
      "        0.0136, 0.0189, 0.0275, 0.0332, 0.0304, 0.0033, 0.0065, 0.0108, 0.0289,\n",
      "        0.0226, 0.0067, 0.0396, 0.0058, 0.0167], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 15: BSNE_Loss=0.1489,local_loss: 0.1488673886399807,global_loss: 0.5670510530471802\n",
      "Q: tensor([0.0112, 0.0554, 0.0922, 0.0079, 0.0242, 0.0086, 0.0075, 0.0176, 0.0349,\n",
      "        0.0112, 0.0116, 0.0266, 0.0270, 0.0526, 0.0065, 0.0529, 0.0053, 0.0152,\n",
      "        0.0174, 0.0096, 0.0104, 0.0055, 0.0308, 0.0087, 0.0046, 0.0122, 0.0263,\n",
      "        0.0218, 0.0075, 0.0201, 0.0291, 0.0144, 0.0171, 0.0182, 0.0116, 0.0140,\n",
      "        0.0047, 0.0084, 0.0091, 0.0389, 0.0188, 0.0095, 0.0095, 0.0051, 0.0299,\n",
      "        0.0229, 0.0705, 0.0068, 0.0056, 0.0126], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 16: BSNE_Loss=0.1795,local_loss: 0.1795212780881006,global_loss: 0.45883709192276\n",
      "Q: tensor([0.0069, 0.0142, 0.0524, 0.0076, 0.0175, 0.0161, 0.0112, 0.0178, 0.0127,\n",
      "        0.0046, 0.0233, 0.0048, 0.0092, 0.0062, 0.0016, 0.0064, 0.0065, 0.0092,\n",
      "        0.0066, 0.0045, 0.2862, 0.0353, 0.0019, 0.0088, 0.0059, 0.0069, 0.0254,\n",
      "        0.0155, 0.0253, 0.0018, 0.0014, 0.0213, 0.0126, 0.0377, 0.0056, 0.0593,\n",
      "        0.0079, 0.0052, 0.0216, 0.0040, 0.0339, 0.0098, 0.0058, 0.0230, 0.0344,\n",
      "        0.0206, 0.0149, 0.0081, 0.0141, 0.0065], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 17: BSNE_Loss=0.4854,local_loss: 0.48541963062510707,global_loss: -0.014121713116765022\n",
      "Q: tensor([0.0053, 0.0168, 0.0070, 0.0224, 0.0496, 0.0514, 0.0069, 0.0275, 0.0078,\n",
      "        0.0430, 0.0223, 0.0242, 0.0128, 0.0120, 0.0181, 0.0101, 0.0184, 0.0353,\n",
      "        0.0134, 0.0058, 0.0059, 0.0035, 0.0072, 0.0269, 0.0087, 0.0121, 0.0061,\n",
      "        0.0906, 0.0244, 0.0216, 0.0073, 0.0125, 0.0696, 0.0396, 0.0062, 0.0255,\n",
      "        0.0566, 0.0103, 0.0117, 0.0102, 0.0199, 0.0226, 0.0108, 0.0175, 0.0078,\n",
      "        0.0283, 0.0066, 0.0066, 0.0107, 0.0025], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 18: BSNE_Loss=0.2038,local_loss: 0.2037677779052507,global_loss: 0.2670712471008301\n",
      "Q: tensor([0.0071, 0.0118, 0.0345, 0.0118, 0.0026, 0.0233, 0.0064, 0.0349, 0.0261,\n",
      "        0.0386, 0.0210, 0.0122, 0.0044, 0.0371, 0.0154, 0.0147, 0.0079, 0.0661,\n",
      "        0.0145, 0.0196, 0.0221, 0.0110, 0.0334, 0.0141, 0.0148, 0.0490, 0.0143,\n",
      "        0.0437, 0.0121, 0.0112, 0.0239, 0.0297, 0.0416, 0.0191, 0.0117, 0.0188,\n",
      "        0.0203, 0.0125, 0.0158, 0.0175, 0.0145, 0.0190, 0.0075, 0.0193, 0.0077,\n",
      "        0.0095, 0.0036, 0.0422, 0.0064, 0.0239], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 19: BSNE_Loss=0.0971,local_loss: 0.09708672359137271,global_loss: 0.35189467668533325\n",
      "Q: tensor([0.0296, 0.0237, 0.0069, 0.0052, 0.0057, 0.0108, 0.0172, 0.0091, 0.0401,\n",
      "        0.0295, 0.0280, 0.0057, 0.0097, 0.0944, 0.0078, 0.0057, 0.0250, 0.0066,\n",
      "        0.0119, 0.0443, 0.0334, 0.0394, 0.0063, 0.0223, 0.0054, 0.0268, 0.0032,\n",
      "        0.0053, 0.0145, 0.0267, 0.0151, 0.0414, 0.0186, 0.0059, 0.0135, 0.0411,\n",
      "        0.0327, 0.0083, 0.0143, 0.0174, 0.0083, 0.0288, 0.0316, 0.0161, 0.0131,\n",
      "        0.0163, 0.0190, 0.0209, 0.0180, 0.0194], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 20: BSNE_Loss=0.1439,local_loss: 0.14390663556588248,global_loss: 0.2761884033679962\n",
      "Q: tensor([0.0385, 0.0240, 0.0053, 0.0155, 0.0621, 0.0528, 0.0067, 0.0073, 0.0177,\n",
      "        0.0167, 0.0421, 0.0107, 0.0116, 0.0156, 0.0089, 0.0182, 0.0631, 0.0191,\n",
      "        0.0137, 0.0188, 0.0175, 0.0086, 0.0528, 0.0324, 0.0380, 0.0076, 0.0495,\n",
      "        0.0214, 0.0169, 0.0132, 0.0090, 0.0083, 0.0339, 0.0085, 0.0012, 0.0149,\n",
      "        0.0088, 0.0192, 0.0100, 0.0091, 0.0245, 0.0063, 0.0080, 0.0133, 0.0061,\n",
      "        0.0102, 0.0198, 0.0071, 0.0187, 0.0368], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 21: BSNE_Loss=0.1553,local_loss: 0.15529296805163142,global_loss: 0.223018616437912\n",
      "Q: tensor([0.0316, 0.0180, 0.0082, 0.0301, 0.0062, 0.0148, 0.0119, 0.0034, 0.0061,\n",
      "        0.0209, 0.0184, 0.0408, 0.0322, 0.0273, 0.0145, 0.0104, 0.0538, 0.0076,\n",
      "        0.0039, 0.0387, 0.0313, 0.0057, 0.0122, 0.0137, 0.0236, 0.0122, 0.0255,\n",
      "        0.0281, 0.0046, 0.0130, 0.0199, 0.0066, 0.0247, 0.0085, 0.0216, 0.0367,\n",
      "        0.0036, 0.0572, 0.0167, 0.0053, 0.0086, 0.0478, 0.0236, 0.0299, 0.0069,\n",
      "        0.0177, 0.0032, 0.0531, 0.0185, 0.0210], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 22: BSNE_Loss=0.1555,local_loss: 0.15550424824929007,global_loss: 0.30417609214782715\n",
      "Q: tensor([0.0311, 0.0240, 0.0233, 0.0245, 0.0075, 0.0066, 0.0205, 0.0074, 0.0174,\n",
      "        0.0146, 0.0083, 0.0299, 0.0357, 0.0150, 0.0028, 0.0119, 0.0143, 0.0245,\n",
      "        0.0216, 0.0170, 0.0165, 0.0307, 0.0107, 0.0106, 0.0188, 0.0127, 0.0316,\n",
      "        0.0132, 0.0113, 0.0241, 0.0317, 0.0072, 0.0328, 0.0995, 0.0057, 0.0152,\n",
      "        0.0212, 0.0455, 0.0354, 0.0105, 0.0096, 0.0085, 0.0144, 0.0453, 0.0046,\n",
      "        0.0069, 0.0209, 0.0087, 0.0315, 0.0070], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 23: BSNE_Loss=0.1144,local_loss: 0.11439661986140344,global_loss: 0.09600300341844559\n",
      "Q: tensor([0.0067, 0.0092, 0.0540, 0.0037, 0.0087, 0.0093, 0.0039, 0.0191, 0.0066,\n",
      "        0.0303, 0.0072, 0.0225, 0.0184, 0.1010, 0.0039, 0.0249, 0.0062, 0.0173,\n",
      "        0.0103, 0.0260, 0.0098, 0.0121, 0.0079, 0.0105, 0.0680, 0.0075, 0.0120,\n",
      "        0.0336, 0.0114, 0.0882, 0.0074, 0.0159, 0.0154, 0.0186, 0.0159, 0.0194,\n",
      "        0.0359, 0.0366, 0.0067, 0.0215, 0.0361, 0.0247, 0.0161, 0.0055, 0.0086,\n",
      "        0.0115, 0.0227, 0.0164, 0.0087, 0.0062], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 24: BSNE_Loss=0.2141,local_loss: 0.2140895177528216,global_loss: 0.24484972655773163\n",
      "Q: tensor([0.0515, 0.0024, 0.0274, 0.0111, 0.0180, 0.0217, 0.0445, 0.0089, 0.0715,\n",
      "        0.0168, 0.0125, 0.0272, 0.0109, 0.0174, 0.0122, 0.0047, 0.0125, 0.0061,\n",
      "        0.0155, 0.0174, 0.0248, 0.0086, 0.0366, 0.0049, 0.0099, 0.0493, 0.0120,\n",
      "        0.0375, 0.0715, 0.0124, 0.0066, 0.0244, 0.0324, 0.0198, 0.0243, 0.0132,\n",
      "        0.0145, 0.0229, 0.0041, 0.0204, 0.0057, 0.0282, 0.0220, 0.0071, 0.0206,\n",
      "        0.0148, 0.0052, 0.0241, 0.0092, 0.0030], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 25: BSNE_Loss=0.1640,local_loss: 0.16403702764124276,global_loss: 0.22236095368862152\n",
      "Q: tensor([0.0695, 0.0042, 0.0229, 0.0152, 0.0158, 0.0186, 0.0066, 0.0071, 0.0181,\n",
      "        0.0309, 0.0077, 0.0163, 0.0100, 0.0067, 0.0088, 0.0300, 0.0070, 0.0104,\n",
      "        0.0078, 0.0133, 0.0357, 0.0326, 0.0118, 0.0309, 0.0066, 0.0298, 0.0083,\n",
      "        0.0136, 0.0335, 0.0106, 0.0084, 0.0431, 0.0132, 0.0034, 0.0051, 0.0478,\n",
      "        0.0143, 0.0985, 0.0087, 0.0418, 0.0280, 0.0238, 0.0102, 0.0065, 0.0077,\n",
      "        0.0149, 0.0415, 0.0067, 0.0105, 0.0258], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 26: BSNE_Loss=0.1851,local_loss: 0.18513238378004798,global_loss: 0.2838283181190491\n",
      "Q: tensor([0.0184, 0.0280, 0.0369, 0.0213, 0.0115, 0.0093, 0.0068, 0.0100, 0.0333,\n",
      "        0.0075, 0.0118, 0.0120, 0.0078, 0.0871, 0.0066, 0.0120, 0.0328, 0.0055,\n",
      "        0.0169, 0.0312, 0.0079, 0.0180, 0.0254, 0.0246, 0.0112, 0.0022, 0.0155,\n",
      "        0.0510, 0.0200, 0.0222, 0.0243, 0.1121, 0.0065, 0.0059, 0.0116, 0.0068,\n",
      "        0.0030, 0.0209, 0.0415, 0.0110, 0.0136, 0.0153, 0.0092, 0.0113, 0.0115,\n",
      "        0.0036, 0.0167, 0.0129, 0.0509, 0.0070], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 27: BSNE_Loss=0.2176,local_loss: 0.2176483799494403,global_loss: 0.24488352239131927\n",
      "Q: tensor([0.0044, 0.0612, 0.0270, 0.0401, 0.0063, 0.0254, 0.0054, 0.0049, 0.0146,\n",
      "        0.0171, 0.0237, 0.0057, 0.0091, 0.0188, 0.0078, 0.0125, 0.0075, 0.0281,\n",
      "        0.0071, 0.0035, 0.1244, 0.0150, 0.0260, 0.0307, 0.0173, 0.0061, 0.0040,\n",
      "        0.0230, 0.0086, 0.0114, 0.0137, 0.0233, 0.0257, 0.0098, 0.0117, 0.0553,\n",
      "        0.0135, 0.0148, 0.0140, 0.0041, 0.0121, 0.0576, 0.0061, 0.0366, 0.0334,\n",
      "        0.0108, 0.0221, 0.0051, 0.0172, 0.0168], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 28: BSNE_Loss=0.2216,local_loss: 0.22164407009852724,global_loss: 0.23826032876968384\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0087, 0.0106, 0.0072, 0.0132, 0.0250, 0.0109, 0.0229, 0.0325, 0.0471,\n",
      "        0.0072, 0.0305, 0.0250, 0.0062, 0.0071, 0.0328, 0.0118, 0.0096, 0.0133,\n",
      "        0.0330, 0.0373, 0.0292, 0.0084, 0.0192, 0.0345, 0.0161, 0.0418, 0.0309,\n",
      "        0.0171, 0.0048, 0.0076, 0.0103, 0.0078, 0.0029, 0.0165, 0.0192, 0.0094,\n",
      "        0.0183, 0.0114, 0.0417, 0.0473, 0.0056, 0.0528, 0.0102, 0.0147, 0.0098,\n",
      "        0.0159, 0.0179, 0.0081, 0.0353, 0.0432], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 29: BSNE_Loss=0.1138,local_loss: 0.1138452732974233,global_loss: 0.10386061668395996\n",
      "Q: tensor([0.0186, 0.0214, 0.0111, 0.0184, 0.0068, 0.0072, 0.0395, 0.0115, 0.0052,\n",
      "        0.0150, 0.0334, 0.0747, 0.0162, 0.0150, 0.0035, 0.0243, 0.0263, 0.0115,\n",
      "        0.0222, 0.0210, 0.0200, 0.0235, 0.0088, 0.0289, 0.0161, 0.0196, 0.0064,\n",
      "        0.0038, 0.0218, 0.0240, 0.0197, 0.0526, 0.0192, 0.0287, 0.0108, 0.0227,\n",
      "        0.0125, 0.0253, 0.0305, 0.0154, 0.0084, 0.0033, 0.0218, 0.0728, 0.0370,\n",
      "        0.0122, 0.0035, 0.0072, 0.0048, 0.0157], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 30: BSNE_Loss=0.1360,local_loss: 0.1360470229866794,global_loss: 0.1694018393754959\n",
      "Q: tensor([0.0388, 0.0078, 0.0224, 0.0132, 0.0155, 0.0082, 0.0203, 0.0152, 0.0063,\n",
      "        0.0166, 0.0376, 0.0182, 0.0197, 0.0065, 0.0285, 0.0101, 0.0062, 0.0167,\n",
      "        0.0210, 0.0100, 0.0285, 0.0052, 0.0071, 0.0696, 0.0135, 0.0143, 0.0112,\n",
      "        0.0115, 0.0024, 0.0208, 0.0084, 0.0309, 0.0335, 0.0165, 0.0226, 0.0186,\n",
      "        0.0429, 0.0068, 0.0106, 0.0106, 0.0166, 0.0210, 0.0027, 0.0570, 0.0082,\n",
      "        0.0187, 0.0321, 0.0263, 0.0222, 0.0710], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 31: BSNE_Loss=0.1356,local_loss: 0.13562681096407608,global_loss: 0.15286891162395477\n",
      "Q: tensor([0.0220, 0.0196, 0.0280, 0.0092, 0.0144, 0.0110, 0.0176, 0.0062, 0.0126,\n",
      "        0.0457, 0.0289, 0.0226, 0.0046, 0.0072, 0.0121, 0.0708, 0.0139, 0.0061,\n",
      "        0.0148, 0.0108, 0.0455, 0.0088, 0.0121, 0.0245, 0.0276, 0.0121, 0.0355,\n",
      "        0.0407, 0.0231, 0.0691, 0.0123, 0.0116, 0.0237, 0.0088, 0.0307, 0.0374,\n",
      "        0.0302, 0.0103, 0.0081, 0.0097, 0.0368, 0.0066, 0.0044, 0.0208, 0.0127,\n",
      "        0.0054, 0.0135, 0.0090, 0.0244, 0.0070], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 32: BSNE_Loss=0.1257,local_loss: 0.12574644242783556,global_loss: 0.5956912040710449\n",
      "Q: tensor([0.0285, 0.0094, 0.0236, 0.0202, 0.0491, 0.0101, 0.0243, 0.0274, 0.0180,\n",
      "        0.0246, 0.0216, 0.0152, 0.0531, 0.0101, 0.0037, 0.0116, 0.0139, 0.0158,\n",
      "        0.0139, 0.0113, 0.0029, 0.0107, 0.0145, 0.0280, 0.0153, 0.0464, 0.0151,\n",
      "        0.0108, 0.0058, 0.0793, 0.0065, 0.0214, 0.0202, 0.0488, 0.0147, 0.0195,\n",
      "        0.0129, 0.0156, 0.0144, 0.0052, 0.0126, 0.0154, 0.0100, 0.0412, 0.0160,\n",
      "        0.0178, 0.0133, 0.0118, 0.0067, 0.0417], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 33: BSNE_Loss=0.1039,local_loss: 0.10387431145363239,global_loss: 0.07699750363826752\n",
      "Q: tensor([0.0344, 0.0424, 0.0097, 0.0125, 0.0128, 0.0234, 0.0260, 0.0264, 0.0363,\n",
      "        0.0113, 0.0119, 0.0107, 0.0453, 0.0237, 0.0110, 0.0061, 0.0205, 0.0109,\n",
      "        0.0411, 0.0324, 0.0090, 0.0207, 0.0665, 0.0221, 0.0436, 0.0231, 0.0115,\n",
      "        0.0174, 0.0065, 0.0117, 0.0349, 0.0165, 0.0053, 0.0166, 0.0060, 0.0184,\n",
      "        0.0079, 0.0046, 0.0106, 0.0205, 0.0123, 0.0099, 0.0176, 0.0270, 0.0232,\n",
      "        0.0172, 0.0109, 0.0190, 0.0247, 0.0160], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 34: BSNE_Loss=0.0637,local_loss: 0.06372568058298367,global_loss: 0.4712964594364166\n",
      "Q: tensor([0.0063, 0.0636, 0.0048, 0.0275, 0.0222, 0.0063, 0.0212, 0.0129, 0.0470,\n",
      "        0.0104, 0.0114, 0.0147, 0.0192, 0.0189, 0.0143, 0.0515, 0.0373, 0.0035,\n",
      "        0.0168, 0.0033, 0.0253, 0.0105, 0.0149, 0.0079, 0.0195, 0.0085, 0.0064,\n",
      "        0.0203, 0.0101, 0.0044, 0.0096, 0.0169, 0.0265, 0.0134, 0.0043, 0.0061,\n",
      "        0.0216, 0.0274, 0.0135, 0.0212, 0.0424, 0.0173, 0.0605, 0.0121, 0.0078,\n",
      "        0.0276, 0.0131, 0.0707, 0.0083, 0.0387], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 35: BSNE_Loss=0.1660,local_loss: 0.16596468111471777,global_loss: 0.16852706670761108\n",
      "Q: tensor([0.0272, 0.0133, 0.0172, 0.0112, 0.0328, 0.0081, 0.0063, 0.0173, 0.0405,\n",
      "        0.0093, 0.0092, 0.0118, 0.0174, 0.0059, 0.0035, 0.0286, 0.0124, 0.0562,\n",
      "        0.0488, 0.0126, 0.0238, 0.0528, 0.0221, 0.0068, 0.0071, 0.0532, 0.0115,\n",
      "        0.0091, 0.0136, 0.0082, 0.0096, 0.0244, 0.0051, 0.0372, 0.0180, 0.0969,\n",
      "        0.0158, 0.0075, 0.0046, 0.0348, 0.0203, 0.0199, 0.0042, 0.0079, 0.0170,\n",
      "        0.0335, 0.0075, 0.0265, 0.0040, 0.0077], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 36: BSNE_Loss=0.1949,local_loss: 0.19490239659787434,global_loss: 0.1387856900691986\n",
      "Q: tensor([0.0200, 0.0218, 0.0125, 0.0094, 0.0228, 0.0460, 0.0092, 0.0210, 0.0118,\n",
      "        0.0079, 0.0132, 0.0120, 0.0325, 0.0351, 0.0396, 0.0080, 0.0257, 0.0106,\n",
      "        0.0127, 0.0194, 0.0437, 0.0118, 0.0104, 0.0065, 0.0577, 0.0124, 0.0150,\n",
      "        0.0312, 0.0626, 0.0143, 0.0140, 0.0056, 0.0109, 0.0729, 0.0082, 0.0127,\n",
      "        0.0087, 0.0218, 0.0074, 0.0174, 0.0440, 0.0100, 0.0107, 0.0075, 0.0073,\n",
      "        0.0117, 0.0289, 0.0038, 0.0228, 0.0167], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 37: BSNE_Loss=0.1214,local_loss: 0.12142443332845605,global_loss: 0.44805365800857544\n",
      "Q: tensor([0.0905, 0.0135, 0.0109, 0.0446, 0.0205, 0.0485, 0.0050, 0.0105, 0.0389,\n",
      "        0.0133, 0.0178, 0.0114, 0.0150, 0.0080, 0.0292, 0.0202, 0.0200, 0.0359,\n",
      "        0.0207, 0.0110, 0.0204, 0.0096, 0.0105, 0.0170, 0.0051, 0.0018, 0.0107,\n",
      "        0.0460, 0.0165, 0.0211, 0.0065, 0.0347, 0.0162, 0.0187, 0.0064, 0.0205,\n",
      "        0.0083, 0.0387, 0.0071, 0.0254, 0.0069, 0.0083, 0.0343, 0.0163, 0.0284,\n",
      "        0.0225, 0.0103, 0.0144, 0.0216, 0.0102], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 38: BSNE_Loss=0.1230,local_loss: 0.12304426883643058,global_loss: -0.039037395268678665\n",
      "Q: tensor([0.0158, 0.0205, 0.0082, 0.0083, 0.0110, 0.0048, 0.0158, 0.0048, 0.0284,\n",
      "        0.0271, 0.0298, 0.0100, 0.0134, 0.0108, 0.0225, 0.0103, 0.0301, 0.0188,\n",
      "        0.0129, 0.0098, 0.0127, 0.0502, 0.0153, 0.0356, 0.0755, 0.0055, 0.0068,\n",
      "        0.0079, 0.0223, 0.0196, 0.0177, 0.0174, 0.0158, 0.0134, 0.0200, 0.0103,\n",
      "        0.0179, 0.0153, 0.0970, 0.0228, 0.0120, 0.0024, 0.0094, 0.0315, 0.0158,\n",
      "        0.0232, 0.0259, 0.0216, 0.0359, 0.0103], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 39: BSNE_Loss=0.1203,local_loss: 0.12028914442877117,global_loss: 0.4095773994922638\n",
      "Q: tensor([0.0123, 0.0474, 0.0214, 0.0429, 0.0224, 0.0098, 0.0117, 0.0136, 0.0094,\n",
      "        0.0307, 0.0263, 0.0176, 0.0599, 0.0180, 0.0113, 0.0110, 0.0107, 0.0183,\n",
      "        0.0082, 0.0211, 0.0167, 0.0174, 0.0149, 0.0223, 0.0062, 0.0158, 0.0249,\n",
      "        0.0434, 0.0055, 0.0092, 0.0211, 0.0067, 0.0032, 0.0072, 0.0403, 0.0314,\n",
      "        0.0284, 0.0140, 0.0243, 0.0060, 0.0184, 0.0120, 0.0167, 0.0145, 0.0152,\n",
      "        0.0158, 0.0956, 0.0056, 0.0141, 0.0093], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 40: BSNE_Loss=0.1125,local_loss: 0.1124989027179334,global_loss: 0.1150633841753006\n",
      "Q: tensor([0.0074, 0.0074, 0.0153, 0.0168, 0.0047, 0.0210, 0.0102, 0.0219, 0.0052,\n",
      "        0.0059, 0.0251, 0.0105, 0.0132, 0.0156, 0.0053, 0.0137, 0.0095, 0.0470,\n",
      "        0.0156, 0.0088, 0.0048, 0.0228, 0.0477, 0.0081, 0.0354, 0.0283, 0.0259,\n",
      "        0.0257, 0.0207, 0.0064, 0.0922, 0.0112, 0.0401, 0.0435, 0.0179, 0.0186,\n",
      "        0.0113, 0.0071, 0.0241, 0.0061, 0.0081, 0.0028, 0.0065, 0.0120, 0.0021,\n",
      "        0.0109, 0.0248, 0.0073, 0.0062, 0.1410], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 41: BSNE_Loss=0.2733,local_loss: 0.2733312828088311,global_loss: 0.10992693901062012\n",
      "Q: tensor([0.0317, 0.0320, 0.0118, 0.0514, 0.0355, 0.0279, 0.0145, 0.0073, 0.0273,\n",
      "        0.0172, 0.0097, 0.0323, 0.0150, 0.0335, 0.0055, 0.0162, 0.0216, 0.0263,\n",
      "        0.0095, 0.0328, 0.0337, 0.0231, 0.0304, 0.0240, 0.0159, 0.0066, 0.0154,\n",
      "        0.0136, 0.0269, 0.0072, 0.0170, 0.0144, 0.0150, 0.0150, 0.0101, 0.0453,\n",
      "        0.0654, 0.0090, 0.0037, 0.0054, 0.0109, 0.0064, 0.0050, 0.0246, 0.0219,\n",
      "        0.0094, 0.0267, 0.0077, 0.0184, 0.0132], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 42: BSNE_Loss=0.0860,local_loss: 0.08595281324227066,global_loss: 0.4088834226131439\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0210, 0.0066, 0.0388, 0.0193, 0.0104, 0.0101, 0.0079, 0.0144, 0.0123,\n",
      "        0.0146, 0.0581, 0.0461, 0.0118, 0.0199, 0.0042, 0.0089, 0.0131, 0.0209,\n",
      "        0.0068, 0.0110, 0.0687, 0.0324, 0.0068, 0.0269, 0.0264, 0.0409, 0.0107,\n",
      "        0.0701, 0.0039, 0.0188, 0.0130, 0.0300, 0.0207, 0.0058, 0.0158, 0.0196,\n",
      "        0.0099, 0.0039, 0.0276, 0.0118, 0.0405, 0.0068, 0.0042, 0.0214, 0.0071,\n",
      "        0.0078, 0.0305, 0.0200, 0.0207, 0.0213], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 43: BSNE_Loss=0.1521,local_loss: 0.15213559949856076,global_loss: 0.229954332113266\n",
      "Q: tensor([0.0098, 0.0046, 0.0585, 0.0104, 0.0055, 0.0094, 0.0179, 0.0254, 0.0098,\n",
      "        0.0114, 0.0078, 0.0153, 0.0237, 0.0121, 0.0198, 0.0106, 0.0215, 0.0027,\n",
      "        0.0295, 0.0035, 0.0117, 0.0575, 0.0041, 0.0236, 0.0112, 0.0093, 0.0132,\n",
      "        0.0096, 0.0076, 0.1024, 0.0448, 0.0247, 0.0136, 0.0016, 0.0290, 0.0440,\n",
      "        0.0068, 0.0277, 0.0082, 0.0476, 0.0182, 0.0054, 0.0052, 0.0370, 0.0096,\n",
      "        0.0169, 0.0070, 0.0146, 0.0365, 0.0423], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 44: BSNE_Loss=0.2384,local_loss: 0.2383759884955878,global_loss: 0.35399508476257324\n",
      "Q: tensor([0.0452, 0.0235, 0.0058, 0.0190, 0.0432, 0.0149, 0.0181, 0.0041, 0.0130,\n",
      "        0.0231, 0.0315, 0.0149, 0.0073, 0.0726, 0.0054, 0.0056, 0.0334, 0.0086,\n",
      "        0.0217, 0.0058, 0.0165, 0.0331, 0.0143, 0.0190, 0.0226, 0.0084, 0.0033,\n",
      "        0.0039, 0.0018, 0.0249, 0.0514, 0.0212, 0.0415, 0.0055, 0.0115, 0.0146,\n",
      "        0.0109, 0.0217, 0.0112, 0.0122, 0.0093, 0.0231, 0.0219, 0.0386, 0.0148,\n",
      "        0.0165, 0.0179, 0.0289, 0.0242, 0.0386], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 45: BSNE_Loss=0.1486,local_loss: 0.14860719933003674,global_loss: 0.3837563395500183\n",
      "Q: tensor([0.0142, 0.0135, 0.0151, 0.0143, 0.0357, 0.0467, 0.0146, 0.0639, 0.0125,\n",
      "        0.0141, 0.0067, 0.0247, 0.0162, 0.0374, 0.0065, 0.0098, 0.0434, 0.0102,\n",
      "        0.0467, 0.0449, 0.0144, 0.0124, 0.0228, 0.0137, 0.0115, 0.0246, 0.0121,\n",
      "        0.0054, 0.0084, 0.0180, 0.0124, 0.0220, 0.0141, 0.0245, 0.0227, 0.0083,\n",
      "        0.0037, 0.0496, 0.0246, 0.0118, 0.0284, 0.0081, 0.0135, 0.0147, 0.0069,\n",
      "        0.0311, 0.0121, 0.0079, 0.0206, 0.0285], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 46: BSNE_Loss=0.0833,local_loss: 0.08328654872846133,global_loss: 0.1665998250246048\n",
      "Q: tensor([0.0468, 0.0301, 0.0041, 0.0257, 0.0180, 0.0348, 0.0103, 0.0210, 0.0108,\n",
      "        0.0413, 0.0604, 0.0103, 0.0062, 0.0178, 0.0055, 0.0154, 0.0222, 0.0213,\n",
      "        0.0128, 0.0091, 0.0273, 0.0408, 0.0084, 0.0032, 0.0777, 0.0058, 0.0069,\n",
      "        0.0091, 0.0246, 0.0312, 0.0211, 0.0131, 0.0395, 0.0128, 0.0327, 0.0102,\n",
      "        0.0165, 0.0631, 0.0083, 0.0106, 0.0241, 0.0077, 0.0078, 0.0297, 0.0112,\n",
      "        0.0077, 0.0075, 0.0106, 0.0020, 0.0051], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 47: BSNE_Loss=0.1916,local_loss: 0.19164817217779542,global_loss: 0.24496488273143768\n",
      "Q: tensor([0.0123, 0.0110, 0.0183, 0.0198, 0.0246, 0.0272, 0.0107, 0.0202, 0.0230,\n",
      "        0.0117, 0.0224, 0.0106, 0.0101, 0.0338, 0.0063, 0.0143, 0.0045, 0.0098,\n",
      "        0.0122, 0.0136, 0.0664, 0.0135, 0.0368, 0.0174, 0.0129, 0.0491, 0.0217,\n",
      "        0.0094, 0.0078, 0.0155, 0.0100, 0.0082, 0.0223, 0.0154, 0.0316, 0.0234,\n",
      "        0.0593, 0.0260, 0.0243, 0.0106, 0.0170, 0.0104, 0.0139, 0.0215, 0.0049,\n",
      "        0.0384, 0.0491, 0.0064, 0.0205, 0.0201], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 48: BSNE_Loss=0.0719,local_loss: 0.0719018591828869,global_loss: 0.6038929224014282\n",
      "Q: tensor([0.0596, 0.0095, 0.0259, 0.0057, 0.0073, 0.0137, 0.0068, 0.0416, 0.0159,\n",
      "        0.0227, 0.0072, 0.0404, 0.0070, 0.0342, 0.0110, 0.0082, 0.0277, 0.0072,\n",
      "        0.0167, 0.0069, 0.0179, 0.0146, 0.0156, 0.0313, 0.0098, 0.0284, 0.0088,\n",
      "        0.0068, 0.0268, 0.0098, 0.0259, 0.0111, 0.0131, 0.0048, 0.0376, 0.0319,\n",
      "        0.0138, 0.0234, 0.0207, 0.0232, 0.0663, 0.0120, 0.0025, 0.0180, 0.0336,\n",
      "        0.0039, 0.0226, 0.0606, 0.0092, 0.0207], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 49: BSNE_Loss=0.1417,local_loss: 0.14165518546938843,global_loss: 0.2024807631969452\n",
      "Q: tensor([0.0124, 0.0128, 0.0454, 0.0087, 0.0105, 0.0084, 0.0087, 0.0253, 0.0259,\n",
      "        0.0113, 0.0082, 0.0099, 0.0105, 0.0092, 0.0043, 0.0317, 0.0096, 0.0135,\n",
      "        0.0154, 0.0058, 0.0189, 0.0187, 0.0088, 0.0138, 0.0089, 0.0076, 0.0067,\n",
      "        0.0083, 0.0055, 0.0129, 0.0424, 0.0220, 0.0285, 0.0038, 0.0102, 0.0222,\n",
      "        0.0043, 0.0114, 0.0078, 0.0085, 0.0073, 0.0064, 0.0302, 0.0412, 0.0234,\n",
      "        0.0053, 0.0107, 0.2306, 0.0420, 0.0443], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 50: BSNE_Loss=0.2971,local_loss: 0.2970500192934554,global_loss: 0.3682089149951935\n",
      "Q: tensor([0.0215, 0.0162, 0.0267, 0.0220, 0.0055, 0.0249, 0.0265, 0.0046, 0.0219,\n",
      "        0.0140, 0.0125, 0.0176, 0.0109, 0.0148, 0.0498, 0.0109, 0.0101, 0.0236,\n",
      "        0.0027, 0.0256, 0.0095, 0.0165, 0.0287, 0.0075, 0.0109, 0.0261, 0.0138,\n",
      "        0.0513, 0.0385, 0.0080, 0.0199, 0.0248, 0.0379, 0.0074, 0.0039, 0.0079,\n",
      "        0.0486, 0.0099, 0.0362, 0.0179, 0.0146, 0.0372, 0.0322, 0.0158, 0.0376,\n",
      "        0.0218, 0.0107, 0.0074, 0.0284, 0.0068], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 51: BSNE_Loss=0.0938,local_loss: 0.09376443077930173,global_loss: 0.25342413783073425\n",
      "Q: tensor([0.0124, 0.0119, 0.0222, 0.0189, 0.0112, 0.0074, 0.0094, 0.0324, 0.0188,\n",
      "        0.0122, 0.0136, 0.0266, 0.0114, 0.0074, 0.0225, 0.0087, 0.0279, 0.0186,\n",
      "        0.0084, 0.0349, 0.0140, 0.0419, 0.0297, 0.0277, 0.0281, 0.0091, 0.0163,\n",
      "        0.0098, 0.0089, 0.0144, 0.0628, 0.0476, 0.0962, 0.0039, 0.0341, 0.0551,\n",
      "        0.0092, 0.0156, 0.0063, 0.0074, 0.0212, 0.0030, 0.0120, 0.0130, 0.0124,\n",
      "        0.0102, 0.0184, 0.0041, 0.0061, 0.0249], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 52: BSNE_Loss=0.1531,local_loss: 0.15311275520148407,global_loss: 0.45293062925338745\n",
      "Q: tensor([0.0344, 0.0220, 0.0225, 0.0137, 0.0093, 0.0105, 0.0099, 0.0485, 0.0573,\n",
      "        0.0502, 0.0133, 0.0067, 0.0078, 0.0146, 0.0035, 0.0857, 0.0205, 0.0117,\n",
      "        0.0072, 0.0098, 0.0152, 0.0090, 0.0068, 0.0257, 0.0235, 0.0269, 0.0263,\n",
      "        0.0312, 0.0134, 0.0139, 0.0078, 0.0127, 0.0292, 0.0366, 0.0095, 0.0085,\n",
      "        0.0226, 0.0308, 0.0160, 0.0142, 0.0209, 0.0223, 0.0165, 0.0121, 0.0056,\n",
      "        0.0282, 0.0112, 0.0071, 0.0257, 0.0116], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 53: BSNE_Loss=0.1060,local_loss: 0.10597134122548245,global_loss: 0.13742299377918243\n",
      "Q: tensor([0.0114, 0.0164, 0.0105, 0.0202, 0.0165, 0.0480, 0.0118, 0.0283, 0.0154,\n",
      "        0.0186, 0.0287, 0.0114, 0.0075, 0.0130, 0.0422, 0.0110, 0.0210, 0.0196,\n",
      "        0.0185, 0.0149, 0.0065, 0.0082, 0.0228, 0.0201, 0.0300, 0.0122, 0.0341,\n",
      "        0.0075, 0.0152, 0.0284, 0.0399, 0.0160, 0.0723, 0.0075, 0.0096, 0.0070,\n",
      "        0.0164, 0.0129, 0.0093, 0.0177, 0.0115, 0.0187, 0.0105, 0.0279, 0.0287,\n",
      "        0.0262, 0.0143, 0.0553, 0.0219, 0.0067], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 54: BSNE_Loss=0.0550,local_loss: 0.054975943317291893,global_loss: 0.5888009071350098\n",
      "Q: tensor([0.0122, 0.0120, 0.0088, 0.0515, 0.0043, 0.0107, 0.0140, 0.0024, 0.0057,\n",
      "        0.0109, 0.0306, 0.0158, 0.0075, 0.0184, 0.0061, 0.0202, 0.0196, 0.0141,\n",
      "        0.0272, 0.0212, 0.0118, 0.0296, 0.0122, 0.0235, 0.0044, 0.0135, 0.0343,\n",
      "        0.0075, 0.0079, 0.0133, 0.0540, 0.0121, 0.0104, 0.0050, 0.0227, 0.0313,\n",
      "        0.0088, 0.0233, 0.0103, 0.0807, 0.0371, 0.0223, 0.0253, 0.0284, 0.0093,\n",
      "        0.0392, 0.0328, 0.0048, 0.0379, 0.0330], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 55: BSNE_Loss=0.1457,local_loss: 0.1457331626635393,global_loss: 0.25527361035346985\n",
      "Q: tensor([0.0488, 0.0808, 0.0038, 0.0325, 0.0072, 0.0058, 0.0211, 0.0136, 0.0166,\n",
      "        0.0076, 0.0080, 0.0112, 0.0094, 0.0409, 0.0037, 0.0143, 0.0143, 0.0146,\n",
      "        0.0309, 0.0115, 0.0089, 0.0140, 0.0173, 0.0244, 0.0351, 0.0418, 0.0044,\n",
      "        0.0072, 0.0205, 0.0409, 0.0141, 0.0259, 0.0209, 0.0120, 0.0072, 0.0065,\n",
      "        0.0309, 0.0265, 0.0098, 0.0293, 0.0276, 0.0185, 0.0229, 0.0127, 0.0134,\n",
      "        0.0109, 0.0142, 0.0387, 0.0072, 0.0397], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 56: BSNE_Loss=0.1242,local_loss: 0.12416466730409666,global_loss: 0.21215380728244781\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0163, 0.0084, 0.0157, 0.0116, 0.0073, 0.0187, 0.0162, 0.0179, 0.0596,\n",
      "        0.0280, 0.0163, 0.0059, 0.0064, 0.0207, 0.0057, 0.0211, 0.0165, 0.0042,\n",
      "        0.0501, 0.0144, 0.0098, 0.0061, 0.0691, 0.0366, 0.0142, 0.0096, 0.0177,\n",
      "        0.0238, 0.0144, 0.0106, 0.0120, 0.0551, 0.0145, 0.0043, 0.0057, 0.0208,\n",
      "        0.0121, 0.0345, 0.0144, 0.0313, 0.0155, 0.0063, 0.0186, 0.0167, 0.0240,\n",
      "        0.0095, 0.0802, 0.0257, 0.0081, 0.0178], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 57: BSNE_Loss=0.1395,local_loss: 0.1395350136225706,global_loss: 0.31491315364837646\n",
      "Q: tensor([0.0158, 0.0119, 0.0119, 0.0301, 0.0308, 0.0082, 0.0164, 0.0265, 0.0061,\n",
      "        0.0254, 0.0202, 0.0105, 0.0102, 0.0117, 0.0095, 0.0219, 0.0199, 0.0044,\n",
      "        0.0107, 0.0243, 0.0298, 0.0472, 0.0331, 0.0307, 0.0147, 0.0123, 0.0120,\n",
      "        0.0131, 0.0268, 0.0277, 0.0245, 0.0156, 0.0344, 0.0070, 0.0112, 0.1210,\n",
      "        0.0304, 0.0239, 0.0158, 0.0069, 0.0145, 0.0090, 0.0109, 0.0098, 0.0105,\n",
      "        0.0288, 0.0091, 0.0118, 0.0081, 0.0233], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 58: BSNE_Loss=0.0923,local_loss: 0.0922900485497458,global_loss: 0.403179407119751\n",
      "Q: tensor([0.0248, 0.0090, 0.0693, 0.0146, 0.0278, 0.0149, 0.0111, 0.0110, 0.0175,\n",
      "        0.0084, 0.0357, 0.0123, 0.0416, 0.0035, 0.0089, 0.0072, 0.0055, 0.0044,\n",
      "        0.0117, 0.0412, 0.0237, 0.0076, 0.0409, 0.0070, 0.0081, 0.0143, 0.0102,\n",
      "        0.0187, 0.0466, 0.0275, 0.0214, 0.0074, 0.0045, 0.0065, 0.0153, 0.0300,\n",
      "        0.0118, 0.0204, 0.0295, 0.0130, 0.0056, 0.0037, 0.0039, 0.0494, 0.0462,\n",
      "        0.0313, 0.0141, 0.0374, 0.0141, 0.0498], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 59: BSNE_Loss=0.1773,local_loss: 0.17733873696023394,global_loss: 0.1717144250869751\n",
      "Q: tensor([0.0089, 0.0111, 0.0369, 0.0171, 0.0130, 0.0047, 0.0104, 0.0057, 0.0275,\n",
      "        0.0597, 0.0259, 0.0076, 0.0143, 0.0127, 0.0056, 0.0200, 0.0282, 0.0068,\n",
      "        0.0873, 0.0093, 0.0188, 0.0324, 0.0316, 0.0046, 0.0115, 0.0143, 0.0168,\n",
      "        0.0293, 0.0050, 0.0085, 0.0215, 0.0054, 0.0208, 0.0157, 0.0060, 0.1316,\n",
      "        0.0176, 0.0166, 0.0072, 0.0162, 0.0212, 0.0075, 0.0163, 0.0162, 0.0144,\n",
      "        0.0158, 0.0341, 0.0137, 0.0098, 0.0069], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 60: BSNE_Loss=0.1937,local_loss: 0.19372958535776724,global_loss: 0.14479263126850128\n",
      "Q: tensor([0.0285, 0.0192, 0.0101, 0.0391, 0.0826, 0.0139, 0.0104, 0.0142, 0.0175,\n",
      "        0.0047, 0.0140, 0.0327, 0.0270, 0.0165, 0.0141, 0.0101, 0.0282, 0.0187,\n",
      "        0.0256, 0.0194, 0.0336, 0.0284, 0.0169, 0.0065, 0.0119, 0.0148, 0.0165,\n",
      "        0.0244, 0.0025, 0.0218, 0.0337, 0.0170, 0.0199, 0.0194, 0.0202, 0.0152,\n",
      "        0.0136, 0.0281, 0.0088, 0.0079, 0.0443, 0.0080, 0.0245, 0.0354, 0.0076,\n",
      "        0.0106, 0.0186, 0.0294, 0.0031, 0.0112], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 61: BSNE_Loss=0.0763,local_loss: 0.07632963318491191,global_loss: 0.5567541718482971\n",
      "Q: tensor([0.0063, 0.0117, 0.0159, 0.0297, 0.0109, 0.0091, 0.0098, 0.0159, 0.0204,\n",
      "        0.0069, 0.0224, 0.0583, 0.0179, 0.0104, 0.0035, 0.0173, 0.0100, 0.0281,\n",
      "        0.0114, 0.0028, 0.0072, 0.0045, 0.0129, 0.0368, 0.0266, 0.0077, 0.0743,\n",
      "        0.0052, 0.0308, 0.0099, 0.0301, 0.0171, 0.0120, 0.0253, 0.0651, 0.0052,\n",
      "        0.0462, 0.0115, 0.0532, 0.0097, 0.0290, 0.0050, 0.0113, 0.0382, 0.0283,\n",
      "        0.0095, 0.0136, 0.0047, 0.0062, 0.0441], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 62: BSNE_Loss=0.1996,local_loss: 0.19961005738345583,global_loss: 0.4566343128681183\n",
      "Q: tensor([0.0917, 0.0161, 0.0214, 0.0144, 0.0204, 0.0123, 0.0159, 0.0137, 0.0227,\n",
      "        0.0055, 0.0086, 0.0243, 0.0182, 0.0084, 0.0054, 0.0047, 0.0372, 0.0639,\n",
      "        0.0075, 0.0238, 0.0298, 0.0068, 0.0114, 0.0183, 0.0329, 0.0028, 0.0040,\n",
      "        0.0049, 0.0183, 0.0149, 0.0834, 0.0153, 0.0118, 0.0112, 0.0591, 0.0268,\n",
      "        0.0140, 0.0188, 0.0116, 0.0355, 0.0077, 0.0116, 0.0117, 0.0092, 0.0125,\n",
      "        0.0053, 0.0135, 0.0101, 0.0391, 0.0116], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 63: BSNE_Loss=0.1903,local_loss: 0.19026907193916265,global_loss: 0.05744237080216408\n",
      "Q: tensor([0.0234, 0.0284, 0.0099, 0.0133, 0.0099, 0.0219, 0.0068, 0.0198, 0.0147,\n",
      "        0.0353, 0.0386, 0.0061, 0.0095, 0.0122, 0.0059, 0.0148, 0.0089, 0.0211,\n",
      "        0.0358, 0.0049, 0.0268, 0.0049, 0.0394, 0.1070, 0.0176, 0.0319, 0.0069,\n",
      "        0.0072, 0.0218, 0.0408, 0.0072, 0.0388, 0.0115, 0.0151, 0.0189, 0.0123,\n",
      "        0.0408, 0.0198, 0.0315, 0.0105, 0.0238, 0.0135, 0.0149, 0.0134, 0.0073,\n",
      "        0.0072, 0.0126, 0.0058, 0.0349, 0.0147], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 64: BSNE_Loss=0.1292,local_loss: 0.12924431372959355,global_loss: 0.11976514011621475\n",
      "Q: tensor([0.0296, 0.0224, 0.0085, 0.0123, 0.0331, 0.0174, 0.0136, 0.0046, 0.0070,\n",
      "        0.0327, 0.0129, 0.0085, 0.0063, 0.0221, 0.0196, 0.0139, 0.0674, 0.0139,\n",
      "        0.0049, 0.0367, 0.0339, 0.0182, 0.0083, 0.0116, 0.0179, 0.0265, 0.0110,\n",
      "        0.0477, 0.0117, 0.0071, 0.0150, 0.0219, 0.0180, 0.0147, 0.0035, 0.0069,\n",
      "        0.0101, 0.0228, 0.0073, 0.0184, 0.0652, 0.0040, 0.0150, 0.0418, 0.0045,\n",
      "        0.0088, 0.0124, 0.0273, 0.0952, 0.0061], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 65: BSNE_Loss=0.1814,local_loss: 0.18142419342694419,global_loss: 0.1527789682149887\n",
      "Q: tensor([0.0121, 0.0325, 0.0025, 0.0114, 0.0144, 0.0285, 0.0491, 0.0241, 0.0127,\n",
      "        0.0179, 0.0061, 0.0343, 0.0133, 0.0155, 0.0114, 0.0072, 0.0183, 0.0144,\n",
      "        0.0480, 0.0172, 0.0340, 0.0109, 0.0590, 0.0042, 0.0141, 0.0483, 0.0057,\n",
      "        0.0151, 0.0090, 0.0297, 0.0033, 0.0236, 0.0127, 0.0148, 0.0091, 0.0327,\n",
      "        0.0107, 0.0155, 0.0033, 0.0265, 0.0522, 0.0177, 0.0115, 0.0098, 0.0282,\n",
      "        0.0061, 0.0256, 0.0305, 0.0124, 0.0327], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 66: BSNE_Loss=0.1308,local_loss: 0.1308039797668533,global_loss: 0.16180527210235596\n",
      "Q: tensor([0.0059, 0.0077, 0.0186, 0.0042, 0.0044, 0.0510, 0.0263, 0.0263, 0.0226,\n",
      "        0.0231, 0.0148, 0.0149, 0.0632, 0.0368, 0.0141, 0.0077, 0.0369, 0.0350,\n",
      "        0.0128, 0.0142, 0.0098, 0.0787, 0.0209, 0.0070, 0.0127, 0.0054, 0.0162,\n",
      "        0.0099, 0.0104, 0.0100, 0.0259, 0.0308, 0.0052, 0.0171, 0.0096, 0.0076,\n",
      "        0.0134, 0.0209, 0.0578, 0.0021, 0.0283, 0.0084, 0.0100, 0.0270, 0.0240,\n",
      "        0.0046, 0.0160, 0.0224, 0.0329, 0.0143], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 67: BSNE_Loss=0.1622,local_loss: 0.16216021064960404,global_loss: 0.47952914237976074\n",
      "Q: tensor([0.0215, 0.0110, 0.0241, 0.0312, 0.0342, 0.0182, 0.0073, 0.0079, 0.0156,\n",
      "        0.0215, 0.0186, 0.0386, 0.0237, 0.0235, 0.0118, 0.0362, 0.0138, 0.0199,\n",
      "        0.0124, 0.0099, 0.0175, 0.0102, 0.0083, 0.0765, 0.0079, 0.0062, 0.0213,\n",
      "        0.0169, 0.0087, 0.0044, 0.0182, 0.0147, 0.0367, 0.0130, 0.0129, 0.0528,\n",
      "        0.0090, 0.0190, 0.0291, 0.0102, 0.0219, 0.0027, 0.0183, 0.0245, 0.0030,\n",
      "        0.0746, 0.0157, 0.0068, 0.0165, 0.0218], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 68: BSNE_Loss=0.1180,local_loss: 0.11797734381808253,global_loss: 0.5195845365524292\n",
      "Q: tensor([0.0210, 0.0073, 0.0198, 0.0171, 0.0216, 0.0298, 0.0256, 0.0079, 0.0039,\n",
      "        0.0045, 0.0091, 0.0247, 0.0074, 0.0259, 0.0065, 0.0576, 0.0173, 0.0117,\n",
      "        0.0104, 0.0938, 0.0235, 0.0460, 0.0087, 0.0048, 0.0431, 0.0116, 0.0081,\n",
      "        0.0596, 0.0097, 0.0336, 0.0215, 0.0147, 0.0034, 0.0223, 0.0188, 0.0554,\n",
      "        0.0143, 0.0117, 0.0046, 0.0370, 0.0209, 0.0030, 0.0011, 0.0053, 0.0098,\n",
      "        0.0189, 0.0136, 0.0056, 0.0403, 0.0068], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 69: BSNE_Loss=0.2499,local_loss: 0.24988173200193017,global_loss: 0.1189027652144432\n",
      "Q: tensor([0.0187, 0.0036, 0.0763, 0.0069, 0.0102, 0.0270, 0.0056, 0.0122, 0.0083,\n",
      "        0.0193, 0.0421, 0.0487, 0.0092, 0.0119, 0.0113, 0.0102, 0.0222, 0.0126,\n",
      "        0.0064, 0.0166, 0.0888, 0.0069, 0.0042, 0.0205, 0.0060, 0.0241, 0.0157,\n",
      "        0.0175, 0.0214, 0.0461, 0.0069, 0.0080, 0.0390, 0.0220, 0.0136, 0.0193,\n",
      "        0.0055, 0.0176, 0.0048, 0.0109, 0.0037, 0.0051, 0.0150, 0.0111, 0.0104,\n",
      "        0.0076, 0.0226, 0.0667, 0.0270, 0.0529], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 70: BSNE_Loss=0.2205,local_loss: 0.22054324813799384,global_loss: 0.38857460021972656\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0218, 0.0152, 0.0246, 0.0215, 0.0081, 0.0148, 0.0215, 0.0134, 0.0236,\n",
      "        0.0343, 0.0316, 0.0329, 0.0246, 0.0108, 0.0107, 0.0645, 0.0117, 0.0159,\n",
      "        0.0105, 0.0065, 0.0242, 0.0148, 0.0165, 0.0257, 0.0085, 0.0267, 0.0148,\n",
      "        0.0183, 0.0562, 0.0067, 0.0215, 0.0233, 0.0165, 0.0113, 0.0084, 0.0184,\n",
      "        0.0260, 0.0201, 0.0124, 0.0190, 0.0291, 0.0128, 0.0099, 0.0113, 0.0201,\n",
      "        0.0330, 0.0424, 0.0150, 0.0101, 0.0083], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 71: BSNE_Loss=0.0227,local_loss: 0.02268631158304327,global_loss: 0.37766513228416443\n",
      "Center node 7312 finished at step 72 with loss 0.0227\n",
      "\n",
      "=== Pretraining Epoch 4 (Center Node: 6714) ===\n",
      "Q: tensor([0.0295, 0.0121, 0.0419, 0.0049, 0.0163, 0.0152, 0.0184, 0.0448, 0.0098,\n",
      "        0.0061, 0.0353, 0.0154, 0.0096, 0.0054, 0.0163, 0.0072, 0.0049, 0.0069,\n",
      "        0.0143, 0.0098, 0.0164, 0.0062, 0.0398, 0.0303, 0.0218, 0.0103, 0.0185,\n",
      "        0.0209, 0.0041, 0.0339, 0.0455, 0.0060, 0.0328, 0.0257, 0.0898, 0.0717,\n",
      "        0.0150, 0.0073, 0.0062, 0.0214, 0.0234, 0.0060, 0.0137, 0.0137, 0.0200,\n",
      "        0.0203, 0.0067, 0.0207, 0.0088, 0.0185], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 0: BSNE_Loss=0.1912,local_loss: 0.1912402470143811,global_loss: 0.3590702712535858\n",
      "Q: tensor([0.0252, 0.0110, 0.0301, 0.0087, 0.0165, 0.0046, 0.0437, 0.0360, 0.0185,\n",
      "        0.0479, 0.0281, 0.0145, 0.0270, 0.0054, 0.0224, 0.0288, 0.0126, 0.0239,\n",
      "        0.0124, 0.0090, 0.0204, 0.0317, 0.0075, 0.0076, 0.0215, 0.0255, 0.0219,\n",
      "        0.0072, 0.0099, 0.0057, 0.0111, 0.0233, 0.0242, 0.0384, 0.0102, 0.0411,\n",
      "        0.0442, 0.0175, 0.0237, 0.0087, 0.0314, 0.0096, 0.0079, 0.0193, 0.0085,\n",
      "        0.0118, 0.0275, 0.0127, 0.0159, 0.0279], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 1: BSNE_Loss=0.0874,local_loss: 0.08741096675243804,global_loss: 0.5265357494354248\n",
      "Q: tensor([0.0072, 0.0411, 0.0990, 0.0398, 0.0473, 0.0499, 0.0265, 0.0131, 0.0106,\n",
      "        0.0066, 0.0202, 0.0118, 0.0292, 0.0038, 0.0114, 0.0196, 0.0057, 0.0049,\n",
      "        0.0069, 0.0141, 0.0172, 0.0197, 0.0130, 0.0077, 0.0043, 0.0131, 0.0110,\n",
      "        0.0219, 0.0281, 0.0730, 0.0171, 0.0073, 0.0133, 0.0037, 0.0100, 0.0031,\n",
      "        0.0089, 0.0520, 0.0641, 0.0273, 0.0084, 0.0201, 0.0197, 0.0171, 0.0131,\n",
      "        0.0048, 0.0067, 0.0098, 0.0078, 0.0081], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 2: BSNE_Loss=0.2687,local_loss: 0.26873847916350957,global_loss: 0.18816570937633514\n",
      "Q: tensor([0.0304, 0.0083, 0.0119, 0.0094, 0.0063, 0.0045, 0.0203, 0.0153, 0.0057,\n",
      "        0.0103, 0.0089, 0.0097, 0.0161, 0.0073, 0.0164, 0.0330, 0.0106, 0.0061,\n",
      "        0.0117, 0.0127, 0.0114, 0.0115, 0.0066, 0.0114, 0.0516, 0.0087, 0.0119,\n",
      "        0.0085, 0.0222, 0.0967, 0.0526, 0.0311, 0.0150, 0.0068, 0.0125, 0.0206,\n",
      "        0.0172, 0.0411, 0.0209, 0.0240, 0.0357, 0.0135, 0.0323, 0.0048, 0.0356,\n",
      "        0.0362, 0.0171, 0.0115, 0.0081, 0.0683], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 3: BSNE_Loss=0.1907,local_loss: 0.19074639180781042,global_loss: 0.5354300737380981\n",
      "Q: tensor([0.0199, 0.0090, 0.0223, 0.0206, 0.0323, 0.0084, 0.0045, 0.0231, 0.0815,\n",
      "        0.0265, 0.0300, 0.0148, 0.0023, 0.0112, 0.0274, 0.0043, 0.0088, 0.0102,\n",
      "        0.0069, 0.0463, 0.0085, 0.0235, 0.0198, 0.0080, 0.0155, 0.0098, 0.0376,\n",
      "        0.0110, 0.0153, 0.0356, 0.0466, 0.0078, 0.0045, 0.0635, 0.0083, 0.0053,\n",
      "        0.0182, 0.0220, 0.0355, 0.0120, 0.0128, 0.0209, 0.0169, 0.0074, 0.0208,\n",
      "        0.0519, 0.0078, 0.0238, 0.0092, 0.0099], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 4: BSNE_Loss=0.1937,local_loss: 0.19372667746353073,global_loss: 0.31133562326431274\n",
      "Q: tensor([0.0847, 0.0766, 0.0412, 0.0282, 0.0130, 0.0405, 0.0197, 0.0219, 0.0065,\n",
      "        0.0163, 0.0215, 0.0038, 0.0067, 0.0275, 0.0202, 0.0135, 0.0305, 0.0101,\n",
      "        0.0217, 0.0195, 0.0287, 0.0181, 0.0207, 0.0117, 0.0109, 0.0092, 0.0110,\n",
      "        0.0074, 0.0047, 0.0062, 0.0189, 0.0141, 0.0207, 0.0058, 0.0298, 0.0047,\n",
      "        0.0166, 0.0275, 0.0185, 0.0213, 0.0309, 0.0101, 0.0211, 0.0098, 0.0035,\n",
      "        0.0113, 0.0081, 0.0143, 0.0475, 0.0134], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 5: BSNE_Loss=0.1639,local_loss: 0.16385420614418364,global_loss: 0.3361993730068207\n",
      "Q: tensor([0.0090, 0.0079, 0.0075, 0.0520, 0.0122, 0.0174, 0.0279, 0.0101, 0.0311,\n",
      "        0.0134, 0.0167, 0.0170, 0.0228, 0.0146, 0.0140, 0.0181, 0.0084, 0.0060,\n",
      "        0.0094, 0.0049, 0.0149, 0.0051, 0.0165, 0.0306, 0.0108, 0.0137, 0.0150,\n",
      "        0.0216, 0.0072, 0.0345, 0.0245, 0.0039, 0.0111, 0.0583, 0.0313, 0.0064,\n",
      "        0.0108, 0.0316, 0.0137, 0.0364, 0.0133, 0.0156, 0.0348, 0.0102, 0.0283,\n",
      "        0.1030, 0.0310, 0.0112, 0.0232, 0.0111], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 6: BSNE_Loss=0.1544,local_loss: 0.15435577581458146,global_loss: 0.4326232373714447\n",
      "Q: tensor([0.1545, 0.0575, 0.0213, 0.0079, 0.0071, 0.0094, 0.0082, 0.0030, 0.0396,\n",
      "        0.0176, 0.0072, 0.0275, 0.0162, 0.0087, 0.0395, 0.0103, 0.0168, 0.0179,\n",
      "        0.0076, 0.0061, 0.0118, 0.0207, 0.0052, 0.0040, 0.0112, 0.0397, 0.0055,\n",
      "        0.0131, 0.0087, 0.0096, 0.0264, 0.0038, 0.0056, 0.0362, 0.0140, 0.0248,\n",
      "        0.0104, 0.0785, 0.0088, 0.0019, 0.0122, 0.0082, 0.0382, 0.0067, 0.0123,\n",
      "        0.0344, 0.0272, 0.0097, 0.0083, 0.0192], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 7: BSNE_Loss=0.3225,local_loss: 0.32245437374238534,global_loss: 0.4333500862121582\n",
      "Q: tensor([0.0193, 0.0327, 0.0341, 0.0739, 0.0126, 0.0159, 0.0510, 0.0184, 0.0098,\n",
      "        0.0115, 0.0279, 0.0100, 0.0065, 0.0184, 0.0102, 0.0159, 0.0185, 0.0110,\n",
      "        0.0131, 0.0143, 0.0105, 0.0261, 0.0053, 0.0118, 0.0090, 0.0169, 0.0071,\n",
      "        0.0374, 0.0275, 0.0047, 0.0053, 0.0209, 0.0077, 0.0161, 0.0296, 0.0060,\n",
      "        0.0259, 0.0044, 0.0086, 0.0043, 0.0037, 0.0047, 0.0111, 0.0476, 0.1345,\n",
      "        0.0152, 0.0146, 0.0058, 0.0099, 0.0428], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 8: BSNE_Loss=0.2564,local_loss: 0.2563917244116952,global_loss: 0.6640487909317017\n",
      "Q: tensor([0.0393, 0.0165, 0.0164, 0.0147, 0.0189, 0.0121, 0.0363, 0.0105, 0.0061,\n",
      "        0.0087, 0.0138, 0.0058, 0.0257, 0.0154, 0.0175, 0.0246, 0.0275, 0.0242,\n",
      "        0.0236, 0.0185, 0.0132, 0.0130, 0.0242, 0.0095, 0.0154, 0.0170, 0.0103,\n",
      "        0.0155, 0.0056, 0.0757, 0.0178, 0.0184, 0.0025, 0.0072, 0.0257, 0.0128,\n",
      "        0.0260, 0.0272, 0.0111, 0.0152, 0.0759, 0.0070, 0.0676, 0.0220, 0.0035,\n",
      "        0.0215, 0.0299, 0.0093, 0.0124, 0.0115], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 13: BSNE_Loss=0.1456,local_loss: 0.1455511918008019,global_loss: 0.14262031018733978\n",
      "Q: tensor([0.0293, 0.0045, 0.0257, 0.0109, 0.0372, 0.0303, 0.0082, 0.0166, 0.0090,\n",
      "        0.0091, 0.0845, 0.0409, 0.0082, 0.0135, 0.0156, 0.0143, 0.0250, 0.0272,\n",
      "        0.0582, 0.0070, 0.0156, 0.0079, 0.0256, 0.0137, 0.0066, 0.0278, 0.0099,\n",
      "        0.0136, 0.0077, 0.0456, 0.0288, 0.0322, 0.0110, 0.0097, 0.0126, 0.0164,\n",
      "        0.0133, 0.0084, 0.0080, 0.0249, 0.0412, 0.0094, 0.0350, 0.0111, 0.0221,\n",
      "        0.0108, 0.0119, 0.0155, 0.0125, 0.0158], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 14: BSNE_Loss=0.1292,local_loss: 0.12917586194861275,global_loss: 0.4404694437980652\n",
      "Q: tensor([0.0041, 0.0338, 0.0224, 0.0226, 0.0052, 0.0251, 0.0102, 0.0340, 0.0195,\n",
      "        0.0099, 0.0274, 0.0117, 0.0114, 0.0296, 0.0098, 0.0341, 0.0343, 0.0244,\n",
      "        0.0186, 0.0208, 0.0164, 0.0223, 0.0114, 0.0078, 0.0159, 0.0033, 0.0083,\n",
      "        0.0010, 0.0231, 0.0287, 0.0489, 0.0051, 0.0059, 0.0112, 0.0175, 0.0465,\n",
      "        0.0285, 0.0492, 0.0083, 0.0231, 0.0489, 0.0210, 0.0163, 0.0055, 0.0113,\n",
      "        0.0242, 0.0161, 0.0073, 0.0188, 0.0390], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 15: BSNE_Loss=0.1590,local_loss: 0.1589893956670787,global_loss: 0.6228703856468201\n",
      "Q: tensor([0.0050, 0.0154, 0.0197, 0.0105, 0.0168, 0.0195, 0.0134, 0.0148, 0.0199,\n",
      "        0.0124, 0.0085, 0.0156, 0.0170, 0.0236, 0.0429, 0.0132, 0.0086, 0.0236,\n",
      "        0.0260, 0.0112, 0.0253, 0.0104, 0.0478, 0.0346, 0.0705, 0.0165, 0.0102,\n",
      "        0.0505, 0.0048, 0.0103, 0.0421, 0.0021, 0.0129, 0.0149, 0.0465, 0.0054,\n",
      "        0.0208, 0.0142, 0.0140, 0.0049, 0.0518, 0.0402, 0.0124, 0.0075, 0.0077,\n",
      "        0.0150, 0.0311, 0.0163, 0.0108, 0.0109], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 16: BSNE_Loss=0.1554,local_loss: 0.15542399525008654,global_loss: 0.5505447387695312\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0116, 0.0137, 0.0096, 0.0163, 0.0127, 0.0075, 0.0079, 0.0078, 0.0412,\n",
      "        0.0216, 0.0537, 0.0388, 0.0077, 0.0152, 0.0195, 0.0125, 0.0121, 0.0092,\n",
      "        0.0417, 0.0114, 0.0254, 0.0111, 0.0110, 0.0350, 0.0239, 0.0184, 0.0079,\n",
      "        0.0334, 0.0171, 0.0075, 0.0104, 0.0090, 0.0030, 0.0804, 0.0206, 0.0067,\n",
      "        0.0096, 0.0264, 0.0166, 0.0273, 0.0189, 0.0098, 0.0121, 0.0827, 0.0178,\n",
      "        0.0230, 0.0236, 0.0277, 0.0081, 0.0039], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 17: BSNE_Loss=0.1672,local_loss: 0.1671810335310321,global_loss: 0.3197144567966461\n",
      "Q: tensor([0.0225, 0.0116, 0.0117, 0.0188, 0.0126, 0.0562, 0.0392, 0.0088, 0.0062,\n",
      "        0.0104, 0.0428, 0.0065, 0.0159, 0.0061, 0.0145, 0.0084, 0.0106, 0.0061,\n",
      "        0.0187, 0.0061, 0.0240, 0.0182, 0.0091, 0.0112, 0.0299, 0.0678, 0.0096,\n",
      "        0.0240, 0.0081, 0.0081, 0.0067, 0.0082, 0.0166, 0.0429, 0.0236, 0.0200,\n",
      "        0.0154, 0.0297, 0.0079, 0.0136, 0.0280, 0.0669, 0.0186, 0.0159, 0.0090,\n",
      "        0.0268, 0.0133, 0.0194, 0.0370, 0.0368], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 18: BSNE_Loss=0.1427,local_loss: 0.1427116534040689,global_loss: 0.6186363101005554\n",
      "Q: tensor([0.0093, 0.0090, 0.0635, 0.0037, 0.0200, 0.0074, 0.0176, 0.0227, 0.0363,\n",
      "        0.0247, 0.0061, 0.0199, 0.0059, 0.0069, 0.0138, 0.0266, 0.0168, 0.0166,\n",
      "        0.0421, 0.0283, 0.0552, 0.0234, 0.0143, 0.0099, 0.0219, 0.0193, 0.0117,\n",
      "        0.0099, 0.0150, 0.0129, 0.0379, 0.0051, 0.0219, 0.0276, 0.0095, 0.0216,\n",
      "        0.0117, 0.0194, 0.0109, 0.0210, 0.0043, 0.0084, 0.0129, 0.0625, 0.0161,\n",
      "        0.0354, 0.0271, 0.0276, 0.0143, 0.0142], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 19: BSNE_Loss=0.1230,local_loss: 0.12303778474547597,global_loss: 0.6827791333198547\n",
      "Q: tensor([0.0105, 0.0406, 0.0360, 0.0099, 0.0064, 0.0137, 0.0088, 0.0109, 0.0152,\n",
      "        0.0127, 0.0067, 0.0102, 0.0121, 0.0042, 0.0045, 0.0030, 0.0042, 0.0025,\n",
      "        0.0144, 0.1567, 0.0029, 0.0517, 0.0298, 0.0356, 0.0164, 0.0191, 0.0052,\n",
      "        0.0229, 0.0163, 0.0181, 0.0062, 0.0063, 0.0133, 0.0229, 0.0298, 0.0234,\n",
      "        0.0225, 0.0286, 0.0151, 0.0404, 0.0124, 0.0161, 0.0096, 0.0176, 0.0123,\n",
      "        0.0322, 0.0449, 0.0181, 0.0232, 0.0039], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 20: BSNE_Loss=0.2770,local_loss: 0.2769923662347101,global_loss: 0.38409358263015747\n",
      "Q: tensor([0.0174, 0.0090, 0.1069, 0.0092, 0.0244, 0.0105, 0.0102, 0.0247, 0.0272,\n",
      "        0.0148, 0.0363, 0.0484, 0.0097, 0.0116, 0.0201, 0.0030, 0.0140, 0.0263,\n",
      "        0.0301, 0.0141, 0.0148, 0.0162, 0.0366, 0.0231, 0.0100, 0.0272, 0.0934,\n",
      "        0.0483, 0.0134, 0.0149, 0.0110, 0.0176, 0.0074, 0.0035, 0.0088, 0.0177,\n",
      "        0.0294, 0.0179, 0.0256, 0.0091, 0.0077, 0.0157, 0.0025, 0.0086, 0.0051,\n",
      "        0.0062, 0.0040, 0.0248, 0.0058, 0.0057], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 21: BSNE_Loss=0.2396,local_loss: 0.23962308704366958,global_loss: 0.6004897952079773\n",
      "Q: tensor([0.0582, 0.0100, 0.0304, 0.0115, 0.0078, 0.0183, 0.0098, 0.0107, 0.0184,\n",
      "        0.0472, 0.0099, 0.0181, 0.0508, 0.0421, 0.0329, 0.0061, 0.0107, 0.0099,\n",
      "        0.0275, 0.0123, 0.0170, 0.0094, 0.0130, 0.0175, 0.0199, 0.0180, 0.0279,\n",
      "        0.0105, 0.0065, 0.0229, 0.0080, 0.0288, 0.0183, 0.0260, 0.0140, 0.0168,\n",
      "        0.0094, 0.0336, 0.0082, 0.0235, 0.0527, 0.0159, 0.0254, 0.0174, 0.0332,\n",
      "        0.0199, 0.0144, 0.0063, 0.0090, 0.0141], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 22: BSNE_Loss=0.0894,local_loss: 0.08936465323159866,global_loss: 0.24573643505573273\n",
      "Q: tensor([0.0104, 0.0121, 0.0026, 0.0177, 0.0090, 0.0188, 0.0224, 0.0265, 0.0211,\n",
      "        0.0243, 0.0094, 0.0360, 0.0109, 0.0454, 0.0676, 0.0168, 0.0096, 0.0268,\n",
      "        0.0242, 0.0044, 0.0252, 0.0024, 0.0221, 0.0128, 0.0275, 0.0243, 0.0305,\n",
      "        0.0188, 0.0048, 0.0370, 0.0038, 0.0132, 0.0126, 0.0093, 0.0111, 0.0124,\n",
      "        0.0166, 0.0252, 0.0073, 0.0117, 0.0417, 0.0140, 0.0135, 0.0115, 0.0640,\n",
      "        0.0093, 0.0411, 0.0128, 0.0074, 0.0399], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 23: BSNE_Loss=0.1630,local_loss: 0.16298308576870066,global_loss: 0.4244086444377899\n",
      "Q: tensor([0.0084, 0.0035, 0.0138, 0.0110, 0.0160, 0.0587, 0.0203, 0.0222, 0.0142,\n",
      "        0.0281, 0.0196, 0.0078, 0.0322, 0.0102, 0.0082, 0.0100, 0.0132, 0.0119,\n",
      "        0.0101, 0.0150, 0.0147, 0.0125, 0.0191, 0.0076, 0.0091, 0.0047, 0.0393,\n",
      "        0.0105, 0.0380, 0.0130, 0.0402, 0.0101, 0.0203, 0.0485, 0.0090, 0.0149,\n",
      "        0.0128, 0.0194, 0.0067, 0.0156, 0.0083, 0.0101, 0.0393, 0.1171, 0.0266,\n",
      "        0.0437, 0.0138, 0.0222, 0.0093, 0.0090], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 24: BSNE_Loss=0.1726,local_loss: 0.172566071551619,global_loss: 0.547863245010376\n",
      "Q: tensor([0.0267, 0.0337, 0.0099, 0.0237, 0.0105, 0.0170, 0.0082, 0.0097, 0.0144,\n",
      "        0.0519, 0.0367, 0.0208, 0.0101, 0.0180, 0.0272, 0.0066, 0.0044, 0.0094,\n",
      "        0.0064, 0.0080, 0.0319, 0.0355, 0.0153, 0.0442, 0.0130, 0.0375, 0.0118,\n",
      "        0.0205, 0.0400, 0.0282, 0.0107, 0.0072, 0.0157, 0.0102, 0.0026, 0.0247,\n",
      "        0.0195, 0.1147, 0.0046, 0.0458, 0.0117, 0.0089, 0.0055, 0.0189, 0.0039,\n",
      "        0.0080, 0.0085, 0.0147, 0.0099, 0.0229], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 25: BSNE_Loss=0.2103,local_loss: 0.21031868530342826,global_loss: 0.3723980486392975\n",
      "Q: tensor([0.0119, 0.0198, 0.0254, 0.0230, 0.0099, 0.0115, 0.0110, 0.0424, 0.0110,\n",
      "        0.0310, 0.0132, 0.0144, 0.0153, 0.0180, 0.0108, 0.0300, 0.0073, 0.0055,\n",
      "        0.0059, 0.0099, 0.0043, 0.0132, 0.0201, 0.0229, 0.0419, 0.0203, 0.0122,\n",
      "        0.0066, 0.0138, 0.0328, 0.0333, 0.0372, 0.0203, 0.0029, 0.0051, 0.0214,\n",
      "        0.0226, 0.0052, 0.0274, 0.0328, 0.0220, 0.1273, 0.0197, 0.0024, 0.0104,\n",
      "        0.0127, 0.0032, 0.0126, 0.0211, 0.0449], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 26: BSNE_Loss=0.2082,local_loss: 0.2082453110197101,global_loss: 0.6172283887863159\n",
      "Q: tensor([0.0063, 0.0815, 0.0139, 0.0084, 0.0103, 0.0093, 0.0146, 0.0122, 0.0038,\n",
      "        0.0604, 0.0176, 0.0404, 0.0128, 0.0044, 0.0173, 0.0037, 0.0064, 0.0085,\n",
      "        0.0366, 0.0143, 0.0074, 0.0115, 0.0285, 0.0083, 0.0092, 0.0125, 0.0061,\n",
      "        0.0031, 0.1245, 0.0107, 0.0057, 0.0255, 0.0164, 0.0115, 0.0302, 0.0059,\n",
      "        0.0222, 0.0115, 0.0072, 0.0411, 0.0139, 0.0397, 0.0075, 0.0594, 0.0214,\n",
      "        0.0094, 0.0170, 0.0062, 0.0384, 0.0055], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 27: BSNE_Loss=0.3041,local_loss: 0.30407295751252644,global_loss: 0.11073422431945801\n",
      "Q: tensor([0.0081, 0.0069, 0.0458, 0.0043, 0.0274, 0.0118, 0.0098, 0.0639, 0.0216,\n",
      "        0.0072, 0.0081, 0.0208, 0.0045, 0.0067, 0.0177, 0.0183, 0.0217, 0.0157,\n",
      "        0.0142, 0.0339, 0.0122, 0.0094, 0.0049, 0.0293, 0.0137, 0.0094, 0.0168,\n",
      "        0.0092, 0.0470, 0.0238, 0.0116, 0.0264, 0.0073, 0.0154, 0.0426, 0.0128,\n",
      "        0.0092, 0.0173, 0.0333, 0.0061, 0.0096, 0.0986, 0.0223, 0.0063, 0.0422,\n",
      "        0.0211, 0.0146, 0.0261, 0.0241, 0.0091], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 28: BSNE_Loss=0.1790,local_loss: 0.17896321269572835,global_loss: 0.399844765663147\n",
      "Q: tensor([0.0115, 0.0104, 0.0201, 0.0484, 0.0156, 0.0738, 0.0135, 0.0150, 0.0128,\n",
      "        0.0246, 0.0167, 0.0101, 0.0328, 0.0199, 0.0097, 0.0038, 0.0751, 0.0110,\n",
      "        0.0082, 0.0416, 0.0073, 0.0198, 0.0288, 0.0160, 0.0068, 0.0141, 0.0180,\n",
      "        0.0083, 0.0349, 0.0144, 0.0091, 0.0246, 0.0112, 0.0205, 0.0046, 0.0269,\n",
      "        0.0132, 0.0178, 0.0266, 0.0194, 0.0090, 0.0054, 0.0248, 0.0112, 0.0095,\n",
      "        0.0131, 0.0067, 0.0094, 0.0763, 0.0177], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 29: BSNE_Loss=0.1603,local_loss: 0.1603079426478404,global_loss: 0.6633093357086182\n",
      "Q: tensor([0.0050, 0.0298, 0.0172, 0.0080, 0.0115, 0.0221, 0.0086, 0.0074, 0.0351,\n",
      "        0.0053, 0.0375, 0.0075, 0.0392, 0.0202, 0.0266, 0.0491, 0.0126, 0.0146,\n",
      "        0.0077, 0.0175, 0.0509, 0.0417, 0.0107, 0.0187, 0.0224, 0.0070, 0.0120,\n",
      "        0.0341, 0.0051, 0.0147, 0.0691, 0.0078, 0.0110, 0.0185, 0.0227, 0.0063,\n",
      "        0.0331, 0.0180, 0.0160, 0.0036, 0.0078, 0.0108, 0.0045, 0.0054, 0.0450,\n",
      "        0.0110, 0.0190, 0.0141, 0.0542, 0.0224], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 30: BSNE_Loss=0.1842,local_loss: 0.18423297414938763,global_loss: 0.47894087433815\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0130, 0.0417, 0.0083, 0.0272, 0.0069, 0.0149, 0.0363, 0.0054, 0.0107,\n",
      "        0.0566, 0.0212, 0.0278, 0.0420, 0.0035, 0.0105, 0.0179, 0.0089, 0.0073,\n",
      "        0.0164, 0.0340, 0.0129, 0.0147, 0.0584, 0.0154, 0.0220, 0.0021, 0.0065,\n",
      "        0.0095, 0.0251, 0.0271, 0.0214, 0.0076, 0.0169, 0.0563, 0.0468, 0.0321,\n",
      "        0.0422, 0.0084, 0.0090, 0.0105, 0.0169, 0.0127, 0.0157, 0.0048, 0.0042,\n",
      "        0.0069, 0.0488, 0.0183, 0.0122, 0.0044], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 31: BSNE_Loss=0.2073,local_loss: 0.20733304431752508,global_loss: 0.24505911767482758\n",
      "Q: tensor([0.0136, 0.0239, 0.0446, 0.0148, 0.0202, 0.0130, 0.0184, 0.0081, 0.0452,\n",
      "        0.0188, 0.0331, 0.0071, 0.0101, 0.0064, 0.0042, 0.0346, 0.0146, 0.0077,\n",
      "        0.0163, 0.0767, 0.0633, 0.0227, 0.0139, 0.0091, 0.0069, 0.0157, 0.0556,\n",
      "        0.0077, 0.0259, 0.0111, 0.0113, 0.0087, 0.0106, 0.0077, 0.0120, 0.0054,\n",
      "        0.0348, 0.0086, 0.0202, 0.0209, 0.0056, 0.0194, 0.0098, 0.0496, 0.0236,\n",
      "        0.0107, 0.0149, 0.0358, 0.0150, 0.0123], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 32: BSNE_Loss=0.1635,local_loss: 0.16350236072190527,global_loss: 0.7742270827293396\n",
      "Q: tensor([0.0213, 0.0182, 0.0203, 0.0299, 0.0202, 0.0076, 0.0337, 0.0268, 0.0082,\n",
      "        0.0423, 0.0128, 0.0188, 0.0131, 0.0303, 0.0093, 0.0115, 0.0113, 0.0088,\n",
      "        0.0621, 0.0064, 0.0231, 0.0253, 0.0066, 0.0212, 0.0254, 0.0069, 0.0118,\n",
      "        0.0025, 0.0074, 0.0167, 0.0160, 0.0042, 0.0238, 0.0108, 0.0187, 0.0071,\n",
      "        0.0172, 0.0503, 0.0166, 0.0094, 0.0142, 0.0221, 0.0673, 0.0359, 0.0048,\n",
      "        0.0419, 0.0082, 0.0071, 0.0504, 0.0141], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 33: BSNE_Loss=0.1620,local_loss: 0.16202476147012912,global_loss: 0.6297881603240967\n",
      "Q: tensor([0.0697, 0.0065, 0.0141, 0.0263, 0.0240, 0.0113, 0.0064, 0.0082, 0.0072,\n",
      "        0.0180, 0.0749, 0.0055, 0.0153, 0.0141, 0.0085, 0.0131, 0.0081, 0.0226,\n",
      "        0.0123, 0.0251, 0.0058, 0.0132, 0.0215, 0.0073, 0.0166, 0.0078, 0.0102,\n",
      "        0.0018, 0.0330, 0.0398, 0.0468, 0.0142, 0.0079, 0.0083, 0.0076, 0.0125,\n",
      "        0.0148, 0.0246, 0.0264, 0.0166, 0.0297, 0.0211, 0.0283, 0.0221, 0.0193,\n",
      "        0.0136, 0.0441, 0.0293, 0.0165, 0.0480], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 34: BSNE_Loss=0.1626,local_loss: 0.1626192676510955,global_loss: 0.533528208732605\n",
      "Q: tensor([0.0075, 0.0207, 0.0182, 0.0179, 0.0147, 0.0277, 0.0156, 0.0478, 0.0298,\n",
      "        0.0151, 0.0086, 0.0498, 0.0066, 0.0148, 0.0083, 0.0084, 0.0365, 0.0129,\n",
      "        0.0115, 0.0523, 0.0125, 0.0756, 0.0177, 0.0114, 0.0736, 0.0030, 0.0536,\n",
      "        0.0152, 0.0099, 0.0367, 0.0167, 0.0119, 0.0166, 0.0120, 0.0060, 0.0120,\n",
      "        0.0050, 0.0094, 0.0371, 0.0061, 0.0176, 0.0086, 0.0235, 0.0074, 0.0099,\n",
      "        0.0103, 0.0138, 0.0206, 0.0094, 0.0121], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 35: BSNE_Loss=0.1810,local_loss: 0.18101318564252616,global_loss: 0.5154457092285156\n",
      "Q: tensor([0.0372, 0.0228, 0.0122, 0.0047, 0.0201, 0.0169, 0.0297, 0.0065, 0.0094,\n",
      "        0.0490, 0.0066, 0.0194, 0.0512, 0.0243, 0.0170, 0.0126, 0.0234, 0.0114,\n",
      "        0.0351, 0.0059, 0.0049, 0.0062, 0.0108, 0.0367, 0.0168, 0.0155, 0.0183,\n",
      "        0.0096, 0.0072, 0.0092, 0.0192, 0.0226, 0.0024, 0.1093, 0.0217, 0.0112,\n",
      "        0.0099, 0.0265, 0.0166, 0.0080, 0.0031, 0.0089, 0.0124, 0.0059, 0.0612,\n",
      "        0.0189, 0.0062, 0.0296, 0.0162, 0.0395], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 36: BSNE_Loss=0.2201,local_loss: 0.22013329035293613,global_loss: 0.33120936155319214\n",
      "Q: tensor([0.0391, 0.0304, 0.0291, 0.0050, 0.0153, 0.0204, 0.0123, 0.0072, 0.0244,\n",
      "        0.0209, 0.0342, 0.0126, 0.0070, 0.0033, 0.0449, 0.0118, 0.0047, 0.0168,\n",
      "        0.0129, 0.0215, 0.0091, 0.0166, 0.0070, 0.0103, 0.0061, 0.0113, 0.0172,\n",
      "        0.0054, 0.0232, 0.0210, 0.0742, 0.0053, 0.0053, 0.0086, 0.0230, 0.0283,\n",
      "        0.0376, 0.0226, 0.0158, 0.0505, 0.0346, 0.0136, 0.0247, 0.0195, 0.0141,\n",
      "        0.0407, 0.0377, 0.0146, 0.0156, 0.0127], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 37: BSNE_Loss=0.1462,local_loss: 0.14617537625915802,global_loss: 0.37835073471069336\n",
      "Q: tensor([0.0069, 0.0194, 0.0108, 0.0085, 0.0438, 0.0571, 0.0272, 0.0180, 0.0065,\n",
      "        0.0117, 0.0047, 0.0475, 0.0100, 0.0128, 0.0115, 0.0130, 0.0057, 0.0079,\n",
      "        0.0167, 0.0372, 0.0070, 0.0107, 0.0167, 0.0169, 0.0263, 0.0055, 0.0708,\n",
      "        0.0084, 0.0254, 0.0293, 0.0047, 0.0254, 0.0193, 0.0402, 0.0178, 0.0236,\n",
      "        0.0238, 0.0213, 0.0104, 0.0246, 0.0221, 0.0125, 0.0050, 0.0607, 0.0430,\n",
      "        0.0109, 0.0124, 0.0027, 0.0110, 0.0149], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 38: BSNE_Loss=0.1802,local_loss: 0.18016014648227946,global_loss: 0.14293025434017181\n",
      "Q: tensor([0.0226, 0.0041, 0.0079, 0.0177, 0.0055, 0.0311, 0.0332, 0.0303, 0.0032,\n",
      "        0.0096, 0.0095, 0.0124, 0.0205, 0.0107, 0.0072, 0.0128, 0.0156, 0.0066,\n",
      "        0.0240, 0.0278, 0.0290, 0.0213, 0.0174, 0.0265, 0.0189, 0.0125, 0.0199,\n",
      "        0.0523, 0.0199, 0.0139, 0.0327, 0.0036, 0.0145, 0.0775, 0.0093, 0.0301,\n",
      "        0.0027, 0.0196, 0.0201, 0.0376, 0.0260, 0.0622, 0.0051, 0.0048, 0.0061,\n",
      "        0.0136, 0.0345, 0.0317, 0.0140, 0.0105], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 39: BSNE_Loss=0.1824,local_loss: 0.1823920912717916,global_loss: 0.437227338552475\n",
      "Q: tensor([0.0390, 0.0432, 0.0336, 0.0165, 0.0166, 0.0109, 0.0163, 0.0128, 0.0187,\n",
      "        0.0213, 0.0146, 0.0173, 0.0125, 0.0029, 0.0188, 0.0051, 0.0029, 0.0119,\n",
      "        0.0152, 0.0182, 0.0043, 0.0062, 0.0120, 0.0118, 0.0133, 0.0062, 0.0182,\n",
      "        0.0234, 0.0082, 0.0913, 0.0071, 0.0085, 0.0262, 0.0144, 0.0299, 0.0279,\n",
      "        0.0022, 0.0393, 0.0217, 0.0318, 0.0401, 0.0353, 0.0076, 0.0196, 0.0201,\n",
      "        0.0391, 0.0156, 0.0253, 0.0203, 0.0249], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 40: BSNE_Loss=0.1645,local_loss: 0.16452836993813613,global_loss: 0.8635373711585999\n",
      "Q: tensor([0.0469, 0.0227, 0.0388, 0.0487, 0.0060, 0.0256, 0.0090, 0.0141, 0.0212,\n",
      "        0.0150, 0.0056, 0.0331, 0.0080, 0.0626, 0.0118, 0.0037, 0.0266, 0.0322,\n",
      "        0.0105, 0.0107, 0.0153, 0.0142, 0.0142, 0.0038, 0.0196, 0.0234, 0.0426,\n",
      "        0.0072, 0.0112, 0.0177, 0.0173, 0.0057, 0.0102, 0.0279, 0.0050, 0.0047,\n",
      "        0.0491, 0.0304, 0.0361, 0.0180, 0.0084, 0.0360, 0.0148, 0.0187, 0.0266,\n",
      "        0.0125, 0.0118, 0.0108, 0.0223, 0.0118], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 41: BSNE_Loss=0.1488,local_loss: 0.14878051650669583,global_loss: 0.05180112272500992\n",
      "Q: tensor([0.0172, 0.0192, 0.0215, 0.0255, 0.0303, 0.0190, 0.0394, 0.0116, 0.0215,\n",
      "        0.0261, 0.0350, 0.0280, 0.0193, 0.0076, 0.0093, 0.0041, 0.0266, 0.0142,\n",
      "        0.0062, 0.0128, 0.0301, 0.0185, 0.0209, 0.0390, 0.0090, 0.0064, 0.0194,\n",
      "        0.0125, 0.0128, 0.0054, 0.0273, 0.0127, 0.0440, 0.0335, 0.0127, 0.0627,\n",
      "        0.0078, 0.0145, 0.0167, 0.0193, 0.0155, 0.0206, 0.0108, 0.0257, 0.0082,\n",
      "        0.0096, 0.0399, 0.0204, 0.0033, 0.0262], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 42: BSNE_Loss=0.0944,local_loss: 0.09442730683289118,global_loss: 0.36799368262290955\n",
      "Q: tensor([0.0060, 0.0357, 0.0150, 0.0252, 0.0266, 0.0119, 0.0461, 0.0107, 0.0187,\n",
      "        0.0078, 0.0259, 0.0052, 0.0224, 0.0276, 0.0085, 0.0064, 0.0155, 0.0028,\n",
      "        0.0200, 0.0119, 0.0146, 0.0116, 0.0094, 0.1010, 0.0075, 0.0130, 0.0115,\n",
      "        0.0132, 0.0308, 0.0373, 0.0188, 0.0045, 0.0141, 0.0047, 0.0103, 0.0109,\n",
      "        0.0161, 0.0192, 0.0048, 0.0175, 0.0442, 0.0248, 0.0060, 0.0082, 0.0158,\n",
      "        0.0206, 0.0609, 0.0147, 0.0756, 0.0088], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 43: BSNE_Loss=0.2119,local_loss: 0.21188920563658784,global_loss: 0.0808902382850647\n",
      "Q: tensor([0.0082, 0.0084, 0.0058, 0.0152, 0.0181, 0.0100, 0.0084, 0.0113, 0.0068,\n",
      "        0.0235, 0.0107, 0.0192, 0.0114, 0.0161, 0.0099, 0.0374, 0.0321, 0.0218,\n",
      "        0.0088, 0.0391, 0.0273, 0.0083, 0.0534, 0.0144, 0.0451, 0.0159, 0.0197,\n",
      "        0.0719, 0.0269, 0.0062, 0.0220, 0.0091, 0.0129, 0.0088, 0.0216, 0.0241,\n",
      "        0.0093, 0.1035, 0.0172, 0.0092, 0.0229, 0.0258, 0.0069, 0.0126, 0.0080,\n",
      "        0.0111, 0.0155, 0.0122, 0.0058, 0.0304], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 44: BSNE_Loss=0.1635,local_loss: 0.1635386185928873,global_loss: 0.5626422762870789\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0188, 0.0084, 0.0095, 0.0210, 0.0119, 0.0375, 0.0301, 0.0123, 0.0151,\n",
      "        0.0143, 0.0567, 0.0057, 0.0134, 0.0065, 0.0173, 0.0050, 0.0372, 0.0149,\n",
      "        0.0229, 0.0313, 0.0217, 0.0227, 0.0096, 0.0575, 0.0233, 0.0230, 0.0242,\n",
      "        0.0100, 0.0176, 0.0093, 0.0039, 0.0188, 0.0196, 0.0103, 0.0073, 0.0142,\n",
      "        0.0072, 0.0092, 0.0374, 0.0360, 0.0243, 0.0177, 0.0164, 0.0065, 0.0197,\n",
      "        0.0304, 0.0246, 0.0017, 0.0735, 0.0125], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 45: BSNE_Loss=0.1492,local_loss: 0.14918400809840623,global_loss: 0.6039785146713257\n",
      "Q: tensor([0.0146, 0.0084, 0.0106, 0.0354, 0.0099, 0.0083, 0.0034, 0.0560, 0.0188,\n",
      "        0.0088, 0.1074, 0.0101, 0.0099, 0.0100, 0.0541, 0.0274, 0.0164, 0.0127,\n",
      "        0.0064, 0.0233, 0.0214, 0.0134, 0.0595, 0.0157, 0.0109, 0.0147, 0.0060,\n",
      "        0.0331, 0.0104, 0.0233, 0.0439, 0.0094, 0.0053, 0.0650, 0.0096, 0.0093,\n",
      "        0.0134, 0.0098, 0.0126, 0.0088, 0.0049, 0.0278, 0.0108, 0.0166, 0.0234,\n",
      "        0.0085, 0.0344, 0.0088, 0.0122, 0.0053], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 46: BSNE_Loss=0.2242,local_loss: 0.22420031237535376,global_loss: 0.4020850658416748\n",
      "Q: tensor([0.0292, 0.0419, 0.0239, 0.0162, 0.0126, 0.0209, 0.0255, 0.0112, 0.0140,\n",
      "        0.0053, 0.0576, 0.0131, 0.0069, 0.0134, 0.0234, 0.0031, 0.0105, 0.0308,\n",
      "        0.0084, 0.0189, 0.0396, 0.0127, 0.1054, 0.0085, 0.0178, 0.0162, 0.0042,\n",
      "        0.0107, 0.0523, 0.0041, 0.0081, 0.0076, 0.0214, 0.0296, 0.0245, 0.0072,\n",
      "        0.0285, 0.0090, 0.0131, 0.0161, 0.0157, 0.0171, 0.0392, 0.0230, 0.0154,\n",
      "        0.0160, 0.0193, 0.0126, 0.0055, 0.0126], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 47: BSNE_Loss=0.1695,local_loss: 0.16946934579570708,global_loss: 0.6463472247123718\n",
      "Q: tensor([0.0110, 0.0130, 0.0272, 0.0454, 0.0277, 0.0079, 0.0617, 0.0152, 0.0202,\n",
      "        0.0269, 0.0310, 0.0103, 0.0349, 0.0245, 0.0164, 0.0092, 0.0138, 0.0074,\n",
      "        0.0208, 0.0058, 0.0163, 0.0299, 0.0100, 0.0141, 0.0165, 0.0065, 0.0138,\n",
      "        0.0164, 0.0241, 0.0064, 0.0571, 0.0116, 0.0082, 0.0120, 0.0466, 0.0059,\n",
      "        0.0164, 0.0365, 0.0199, 0.0280, 0.0166, 0.0071, 0.0205, 0.0045, 0.0204,\n",
      "        0.0086, 0.0277, 0.0189, 0.0212, 0.0279], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 48: BSNE_Loss=0.1031,local_loss: 0.10314339399459668,global_loss: 0.5930615663528442\n",
      "Q: tensor([0.0063, 0.0288, 0.0134, 0.0051, 0.0782, 0.0152, 0.0099, 0.0093, 0.0066,\n",
      "        0.0122, 0.0175, 0.0697, 0.0115, 0.0201, 0.0042, 0.0186, 0.0183, 0.0632,\n",
      "        0.0183, 0.0144, 0.0087, 0.0237, 0.0065, 0.0116, 0.0097, 0.0254, 0.0199,\n",
      "        0.0120, 0.0120, 0.0107, 0.0313, 0.0073, 0.0216, 0.0199, 0.0253, 0.0031,\n",
      "        0.0029, 0.0219, 0.0300, 0.0061, 0.0211, 0.0130, 0.0368, 0.0080, 0.0625,\n",
      "        0.0174, 0.0347, 0.0187, 0.0284, 0.0088], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 49: BSNE_Loss=0.1915,local_loss: 0.19150830474837016,global_loss: 0.7858638167381287\n",
      "Q: tensor([0.0132, 0.0208, 0.0061, 0.0136, 0.0057, 0.0215, 0.0062, 0.0110, 0.0327,\n",
      "        0.0192, 0.0308, 0.0098, 0.0215, 0.0099, 0.0297, 0.0360, 0.0056, 0.0031,\n",
      "        0.0328, 0.0342, 0.0463, 0.0170, 0.0078, 0.0324, 0.0217, 0.0198, 0.0036,\n",
      "        0.0230, 0.0110, 0.0097, 0.0135, 0.0277, 0.0092, 0.0204, 0.0378, 0.0124,\n",
      "        0.0669, 0.0608, 0.0158, 0.0165, 0.0163, 0.0194, 0.0106, 0.0195, 0.0134,\n",
      "        0.0233, 0.0076, 0.0047, 0.0349, 0.0138], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 50: BSNE_Loss=0.1399,local_loss: 0.13988144108634823,global_loss: 0.5230960845947266\n",
      "Q: tensor([0.0184, 0.0136, 0.0092, 0.0118, 0.0115, 0.0225, 0.0079, 0.0151, 0.0275,\n",
      "        0.0072, 0.0050, 0.0093, 0.0218, 0.0171, 0.0393, 0.0111, 0.0337, 0.0228,\n",
      "        0.0158, 0.0288, 0.0045, 0.0031, 0.0096, 0.0170, 0.0095, 0.0117, 0.0145,\n",
      "        0.0126, 0.0168, 0.0278, 0.0054, 0.0142, 0.0315, 0.0312, 0.0388, 0.0161,\n",
      "        0.0230, 0.0095, 0.0130, 0.0319, 0.0188, 0.0219, 0.0351, 0.0328, 0.0059,\n",
      "        0.0279, 0.0354, 0.0720, 0.0564, 0.0029], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 51: BSNE_Loss=0.1424,local_loss: 0.1424453738530259,global_loss: 0.5645911693572998\n",
      "Q: tensor([0.0062, 0.0175, 0.0035, 0.3248, 0.0078, 0.0243, 0.0035, 0.0090, 0.0047,\n",
      "        0.0117, 0.0286, 0.0207, 0.0094, 0.0202, 0.0361, 0.0102, 0.0040, 0.0107,\n",
      "        0.0192, 0.0298, 0.0094, 0.0034, 0.0063, 0.0070, 0.0126, 0.0061, 0.0028,\n",
      "        0.0171, 0.0034, 0.0280, 0.0104, 0.0188, 0.0076, 0.0104, 0.0048, 0.0266,\n",
      "        0.0119, 0.0047, 0.0087, 0.0122, 0.0253, 0.0165, 0.0149, 0.0087, 0.0236,\n",
      "        0.0154, 0.0066, 0.0132, 0.0534, 0.0081], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 52: BSNE_Loss=0.4582,local_loss: 0.4582386112822334,global_loss: 0.16815301775932312\n",
      "Q: tensor([0.0072, 0.0344, 0.0093, 0.0112, 0.0072, 0.0260, 0.0085, 0.0177, 0.0202,\n",
      "        0.0164, 0.0062, 0.0371, 0.0040, 0.0182, 0.0301, 0.0160, 0.0167, 0.0261,\n",
      "        0.0109, 0.0137, 0.0104, 0.0068, 0.0192, 0.0183, 0.0181, 0.0864, 0.0808,\n",
      "        0.0129, 0.0097, 0.0134, 0.0183, 0.0193, 0.0109, 0.0229, 0.0091, 0.0289,\n",
      "        0.0124, 0.0085, 0.0447, 0.0149, 0.0108, 0.0331, 0.0125, 0.0204, 0.0193,\n",
      "        0.0302, 0.0037, 0.0119, 0.0380, 0.0170], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 53: BSNE_Loss=0.1375,local_loss: 0.1374973285794241,global_loss: 0.4668706953525543\n",
      "Q: tensor([0.0087, 0.0042, 0.0119, 0.0131, 0.0526, 0.0164, 0.0146, 0.0331, 0.0159,\n",
      "        0.0175, 0.0385, 0.0241, 0.0169, 0.0025, 0.0047, 0.0140, 0.0121, 0.0112,\n",
      "        0.0152, 0.0119, 0.0232, 0.0092, 0.0045, 0.0262, 0.0505, 0.0315, 0.0096,\n",
      "        0.0148, 0.0229, 0.0428, 0.0077, 0.0117, 0.0104, 0.0049, 0.0257, 0.0237,\n",
      "        0.0201, 0.0079, 0.0240, 0.0194, 0.0269, 0.0101, 0.0098, 0.0374, 0.0107,\n",
      "        0.0198, 0.0102, 0.0113, 0.0512, 0.0831], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 54: BSNE_Loss=0.1618,local_loss: 0.16182266857886238,global_loss: 0.7188190221786499\n",
      "Q: tensor([0.0047, 0.0027, 0.0265, 0.0114, 0.0141, 0.0086, 0.0128, 0.0072, 0.0073,\n",
      "        0.0144, 0.0117, 0.0086, 0.0381, 0.0084, 0.0713, 0.0054, 0.0305, 0.0055,\n",
      "        0.0131, 0.0058, 0.0143, 0.0116, 0.0229, 0.0530, 0.0074, 0.0078, 0.0315,\n",
      "        0.0069, 0.0109, 0.0097, 0.0050, 0.0130, 0.0245, 0.0316, 0.0038, 0.0680,\n",
      "        0.0133, 0.0079, 0.0208, 0.0317, 0.0221, 0.0108, 0.0161, 0.0533, 0.0104,\n",
      "        0.0403, 0.0567, 0.0199, 0.0551, 0.0118], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 55: BSNE_Loss=0.2382,local_loss: 0.23816435251943746,global_loss: 0.43786343932151794\n",
      "Q: tensor([0.0060, 0.0180, 0.0266, 0.0054, 0.0053, 0.0184, 0.0182, 0.0080, 0.0315,\n",
      "        0.0145, 0.1019, 0.0257, 0.0289, 0.0302, 0.0144, 0.0195, 0.0077, 0.0081,\n",
      "        0.0338, 0.0198, 0.0069, 0.0405, 0.0354, 0.0035, 0.0089, 0.0036, 0.0155,\n",
      "        0.0068, 0.0076, 0.0287, 0.0121, 0.0145, 0.0177, 0.0128, 0.0404, 0.0023,\n",
      "        0.0434, 0.0241, 0.0242, 0.0104, 0.0094, 0.0507, 0.0155, 0.0087, 0.0322,\n",
      "        0.0352, 0.0126, 0.0248, 0.0030, 0.0066], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 56: BSNE_Loss=0.2174,local_loss: 0.21742152931289574,global_loss: 0.3516806662082672\n",
      "Q: tensor([0.0432, 0.0314, 0.0263, 0.0068, 0.0223, 0.0040, 0.0261, 0.0091, 0.0452,\n",
      "        0.0078, 0.0158, 0.0075, 0.0120, 0.0082, 0.0407, 0.0849, 0.0061, 0.0364,\n",
      "        0.0476, 0.0179, 0.0198, 0.0049, 0.0169, 0.0212, 0.0166, 0.0299, 0.0227,\n",
      "        0.0103, 0.0148, 0.0465, 0.0086, 0.0095, 0.0049, 0.0103, 0.0055, 0.0129,\n",
      "        0.0105, 0.0324, 0.0178, 0.0340, 0.0346, 0.0157, 0.0200, 0.0181, 0.0062,\n",
      "        0.0191, 0.0093, 0.0071, 0.0032, 0.0174], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 57: BSNE_Loss=0.1798,local_loss: 0.17978823206421551,global_loss: 0.3786770701408386\n",
      "Q: tensor([0.0246, 0.0157, 0.0423, 0.0135, 0.0122, 0.0259, 0.0173, 0.0132, 0.0087,\n",
      "        0.0491, 0.0058, 0.0120, 0.0351, 0.0191, 0.0622, 0.0284, 0.0124, 0.0463,\n",
      "        0.0250, 0.0114, 0.0392, 0.0033, 0.0231, 0.0077, 0.0282, 0.0136, 0.0463,\n",
      "        0.0090, 0.0089, 0.0083, 0.0253, 0.0061, 0.0103, 0.0467, 0.0056, 0.0105,\n",
      "        0.0301, 0.0143, 0.0103, 0.0073, 0.0321, 0.0193, 0.0113, 0.0198, 0.0127,\n",
      "        0.0120, 0.0097, 0.0182, 0.0198, 0.0109], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 58: BSNE_Loss=0.1265,local_loss: 0.12650061894470066,global_loss: 0.3524957597255707\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0373, 0.0132, 0.0054, 0.0314, 0.0041, 0.0255, 0.0151, 0.0296, 0.0107,\n",
      "        0.0077, 0.0894, 0.0047, 0.0177, 0.0054, 0.0334, 0.0040, 0.0344, 0.0038,\n",
      "        0.0130, 0.0053, 0.0161, 0.0106, 0.0223, 0.0539, 0.0303, 0.0558, 0.0190,\n",
      "        0.0361, 0.0096, 0.0029, 0.0102, 0.0129, 0.0014, 0.0168, 0.0410, 0.0131,\n",
      "        0.0056, 0.0117, 0.0133, 0.0268, 0.0100, 0.0119, 0.0166, 0.0060, 0.0276,\n",
      "        0.0258, 0.0301, 0.0148, 0.0462, 0.0105], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 59: BSNE_Loss=0.2455,local_loss: 0.24548822679897794,global_loss: 0.4903656840324402\n",
      "Q: tensor([0.0479, 0.0044, 0.0209, 0.0100, 0.0073, 0.0061, 0.0098, 0.0144, 0.0113,\n",
      "        0.0073, 0.0048, 0.0208, 0.0163, 0.0181, 0.0148, 0.0129, 0.0100, 0.0122,\n",
      "        0.0104, 0.0459, 0.0123, 0.0043, 0.0232, 0.0124, 0.0185, 0.0562, 0.0114,\n",
      "        0.0082, 0.0090, 0.0314, 0.0279, 0.0150, 0.0037, 0.0276, 0.0181, 0.0076,\n",
      "        0.0201, 0.0057, 0.0289, 0.0188, 0.0448, 0.1929, 0.0129, 0.0135, 0.0222,\n",
      "        0.0122, 0.0099, 0.0096, 0.0094, 0.0065], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 60: BSNE_Loss=0.2653,local_loss: 0.26532683661383816,global_loss: 0.24524299800395966\n",
      "Q: tensor([0.0131, 0.0187, 0.0339, 0.0057, 0.0070, 0.0300, 0.0101, 0.0105, 0.0065,\n",
      "        0.0235, 0.0493, 0.0139, 0.0439, 0.0310, 0.0114, 0.0026, 0.0064, 0.0101,\n",
      "        0.0100, 0.0153, 0.0552, 0.0057, 0.0419, 0.0164, 0.0303, 0.0283, 0.0060,\n",
      "        0.0202, 0.0227, 0.0208, 0.0322, 0.0286, 0.0110, 0.0283, 0.0104, 0.0219,\n",
      "        0.0059, 0.0217, 0.0187, 0.0048, 0.0230, 0.0114, 0.0160, 0.0193, 0.0197,\n",
      "        0.0469, 0.0329, 0.0129, 0.0171, 0.0171], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 61: BSNE_Loss=0.1244,local_loss: 0.12435391688692353,global_loss: 0.5729756355285645\n",
      "Q: tensor([0.0154, 0.0155, 0.0074, 0.1816, 0.0482, 0.0141, 0.0144, 0.0059, 0.0298,\n",
      "        0.0237, 0.0057, 0.0201, 0.0104, 0.0105, 0.0114, 0.0155, 0.0102, 0.0143,\n",
      "        0.0086, 0.0213, 0.0047, 0.0035, 0.0406, 0.0037, 0.0412, 0.0033, 0.0064,\n",
      "        0.0092, 0.0326, 0.0077, 0.0085, 0.0157, 0.0084, 0.0092, 0.0026, 0.0344,\n",
      "        0.0111, 0.0172, 0.0222, 0.0127, 0.0405, 0.0466, 0.0156, 0.0319, 0.0260,\n",
      "        0.0097, 0.0067, 0.0099, 0.0122, 0.0220], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 62: BSNE_Loss=0.2896,local_loss: 0.2895669913208412,global_loss: 0.3465491235256195\n",
      "Q: tensor([0.0228, 0.0224, 0.0054, 0.0124, 0.0824, 0.0103, 0.0138, 0.0422, 0.1730,\n",
      "        0.0134, 0.0124, 0.0069, 0.0077, 0.0114, 0.0140, 0.0039, 0.0145, 0.0155,\n",
      "        0.0064, 0.0335, 0.0296, 0.0181, 0.0135, 0.0205, 0.0023, 0.0088, 0.0177,\n",
      "        0.0199, 0.0077, 0.0064, 0.0190, 0.0059, 0.0172, 0.0076, 0.0051, 0.0291,\n",
      "        0.0147, 0.0103, 0.0039, 0.0136, 0.0077, 0.0047, 0.0193, 0.0084, 0.0264,\n",
      "        0.0682, 0.0062, 0.0220, 0.0177, 0.0241], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 63: BSNE_Loss=0.3022,local_loss: 0.30216311919438854,global_loss: 0.3538023829460144\n",
      "Q: tensor([0.0148, 0.0267, 0.0152, 0.0379, 0.0482, 0.0339, 0.0167, 0.0097, 0.0189,\n",
      "        0.0130, 0.0169, 0.0333, 0.0118, 0.0457, 0.0177, 0.0087, 0.0086, 0.0069,\n",
      "        0.0215, 0.0120, 0.0123, 0.0348, 0.0172, 0.0463, 0.0125, 0.0242, 0.0085,\n",
      "        0.0079, 0.0176, 0.0051, 0.0215, 0.0068, 0.0155, 0.0059, 0.0031, 0.0271,\n",
      "        0.0055, 0.0179, 0.0379, 0.0123, 0.0299, 0.0292, 0.0071, 0.0119, 0.0469,\n",
      "        0.0412, 0.0452, 0.0126, 0.0097, 0.0084], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 64: BSNE_Loss=0.1315,local_loss: 0.13149710463595382,global_loss: 0.3651365041732788\n",
      "Q: tensor([0.0206, 0.0134, 0.0284, 0.0066, 0.0083, 0.0051, 0.0150, 0.0322, 0.0147,\n",
      "        0.0058, 0.0208, 0.0099, 0.0142, 0.0232, 0.0082, 0.0088, 0.0070, 0.0151,\n",
      "        0.0516, 0.0228, 0.0080, 0.0163, 0.0048, 0.0125, 0.0634, 0.0185, 0.0067,\n",
      "        0.0031, 0.0561, 0.0075, 0.0182, 0.0079, 0.0035, 0.0244, 0.0248, 0.0037,\n",
      "        0.0140, 0.0210, 0.0152, 0.0071, 0.0438, 0.0162, 0.0031, 0.0200, 0.0168,\n",
      "        0.0421, 0.1100, 0.0088, 0.0565, 0.0146], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 65: BSNE_Loss=0.2592,local_loss: 0.25922259716319734,global_loss: 0.4761529564857483\n",
      "Q: tensor([0.0267, 0.0327, 0.0177, 0.0176, 0.0141, 0.0257, 0.0082, 0.0287, 0.0207,\n",
      "        0.0167, 0.0092, 0.0213, 0.0136, 0.0080, 0.0273, 0.0061, 0.0060, 0.0091,\n",
      "        0.0319, 0.0166, 0.0307, 0.0071, 0.0271, 0.0194, 0.0125, 0.0476, 0.0178,\n",
      "        0.0540, 0.0282, 0.0219, 0.0158, 0.0206, 0.0038, 0.0128, 0.0091, 0.0585,\n",
      "        0.0043, 0.0054, 0.0093, 0.0409, 0.0087, 0.0146, 0.0196, 0.0253, 0.0195,\n",
      "        0.0400, 0.0137, 0.0125, 0.0329, 0.0087], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 66: BSNE_Loss=0.1122,local_loss: 0.11217552479626106,global_loss: 0.48532021045684814\n",
      "Q: tensor([0.0109, 0.0334, 0.0158, 0.0031, 0.0221, 0.0149, 0.0070, 0.0306, 0.0066,\n",
      "        0.0097, 0.0937, 0.0082, 0.0385, 0.0619, 0.0180, 0.0122, 0.0085, 0.0080,\n",
      "        0.0096, 0.0273, 0.0398, 0.0061, 0.0128, 0.0066, 0.0460, 0.0274, 0.0204,\n",
      "        0.0173, 0.0037, 0.0228, 0.0128, 0.0136, 0.0106, 0.0068, 0.0320, 0.0149,\n",
      "        0.0143, 0.0112, 0.0084, 0.0095, 0.0144, 0.0282, 0.0493, 0.0164, 0.0216,\n",
      "        0.0144, 0.0563, 0.0061, 0.0126, 0.0035], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 67: BSNE_Loss=0.2063,local_loss: 0.20633816651110684,global_loss: 0.5524522662162781\n",
      "Q: tensor([0.0055, 0.0165, 0.0121, 0.0145, 0.0418, 0.1459, 0.0205, 0.0297, 0.0106,\n",
      "        0.0258, 0.0062, 0.0036, 0.0095, 0.0148, 0.0081, 0.0096, 0.0096, 0.0289,\n",
      "        0.0096, 0.0193, 0.0091, 0.0428, 0.0069, 0.0190, 0.0356, 0.0417, 0.0067,\n",
      "        0.0107, 0.0089, 0.0259, 0.0128, 0.0050, 0.0093, 0.0227, 0.0067, 0.0170,\n",
      "        0.0106, 0.0058, 0.0391, 0.0169, 0.0113, 0.0137, 0.0176, 0.0198, 0.0206,\n",
      "        0.0882, 0.0174, 0.0093, 0.0043, 0.0023], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 68: BSNE_Loss=0.2668,local_loss: 0.26677088039314434,global_loss: 0.3493293821811676\n",
      "Q: tensor([0.0160, 0.0189, 0.0132, 0.0134, 0.0814, 0.0135, 0.0178, 0.0261, 0.0581,\n",
      "        0.0117, 0.0082, 0.0126, 0.0103, 0.0092, 0.0125, 0.0147, 0.0199, 0.0091,\n",
      "        0.0051, 0.0522, 0.0080, 0.0353, 0.0175, 0.0154, 0.0113, 0.0273, 0.0058,\n",
      "        0.0457, 0.0108, 0.0319, 0.0128, 0.0031, 0.0096, 0.0037, 0.0153, 0.0182,\n",
      "        0.0184, 0.0263, 0.0142, 0.0137, 0.0224, 0.0149, 0.0301, 0.0103, 0.0449,\n",
      "        0.0219, 0.0192, 0.0162, 0.0318, 0.0200], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 69: BSNE_Loss=0.1286,local_loss: 0.12859736099392272,global_loss: 0.3437837064266205\n",
      "Q: tensor([0.0133, 0.0367, 0.0281, 0.0091, 0.0352, 0.0061, 0.0113, 0.0038, 0.0230,\n",
      "        0.0097, 0.0622, 0.0547, 0.0207, 0.0046, 0.0184, 0.0140, 0.0100, 0.0098,\n",
      "        0.0251, 0.0304, 0.0110, 0.0197, 0.0074, 0.0325, 0.0247, 0.0076, 0.0236,\n",
      "        0.0087, 0.0185, 0.0193, 0.0168, 0.0057, 0.0094, 0.0678, 0.0138, 0.0324,\n",
      "        0.0469, 0.0101, 0.0126, 0.0117, 0.0137, 0.0160, 0.0098, 0.0132, 0.0148,\n",
      "        0.0186, 0.0142, 0.0556, 0.0061, 0.0119], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 70: BSNE_Loss=0.1481,local_loss: 0.14813489582856196,global_loss: 0.6361371874809265\n",
      "Q: tensor([0.0181, 0.0332, 0.0132, 0.0078, 0.0247, 0.0341, 0.0147, 0.0817, 0.0076,\n",
      "        0.0095, 0.0027, 0.0042, 0.0225, 0.0252, 0.0126, 0.0197, 0.0079, 0.0090,\n",
      "        0.0344, 0.0084, 0.0226, 0.0090, 0.0232, 0.0283, 0.0146, 0.0363, 0.0297,\n",
      "        0.0312, 0.0144, 0.0209, 0.0130, 0.0194, 0.0083, 0.0207, 0.0174, 0.0350,\n",
      "        0.0248, 0.0100, 0.0123, 0.0071, 0.0294, 0.0124, 0.0072, 0.0257, 0.0376,\n",
      "        0.0064, 0.0124, 0.0530, 0.0153, 0.0116], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 71: BSNE_Loss=0.1252,local_loss: 0.1252480479960482,global_loss: 0.3385247588157654\n",
      "Q: tensor([0.0112, 0.0101, 0.0152, 0.0132, 0.1074, 0.0292, 0.0105, 0.0393, 0.0101,\n",
      "        0.0071, 0.0097, 0.0336, 0.0286, 0.0042, 0.0179, 0.0103, 0.0367, 0.0155,\n",
      "        0.0299, 0.0439, 0.0188, 0.0538, 0.0074, 0.0226, 0.0122, 0.0337, 0.0142,\n",
      "        0.0162, 0.0287, 0.0617, 0.0213, 0.0061, 0.0094, 0.0083, 0.0220, 0.0150,\n",
      "        0.0076, 0.0048, 0.0101, 0.0131, 0.0098, 0.0193, 0.0092, 0.0113, 0.0227,\n",
      "        0.0060, 0.0046, 0.0074, 0.0266, 0.0124], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 72: BSNE_Loss=0.1844,local_loss: 0.1843663456541483,global_loss: 0.46464216709136963\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0120, 0.0102, 0.0306, 0.0084, 0.0263, 0.0065, 0.0132, 0.0346, 0.0260,\n",
      "        0.0155, 0.0129, 0.0171, 0.0101, 0.0098, 0.0137, 0.0032, 0.0103, 0.0184,\n",
      "        0.0341, 0.0459, 0.0054, 0.0235, 0.0307, 0.0252, 0.0201, 0.0221, 0.0094,\n",
      "        0.0104, 0.0468, 0.0108, 0.0130, 0.0529, 0.0057, 0.0557, 0.0134, 0.0057,\n",
      "        0.0205, 0.0206, 0.0142, 0.0201, 0.0076, 0.0316, 0.0076, 0.0592, 0.0107,\n",
      "        0.0314, 0.0056, 0.0344, 0.0128, 0.0141], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 73: BSNE_Loss=0.1391,local_loss: 0.1390945676199974,global_loss: 0.2713274657726288\n",
      "Q: tensor([0.0339, 0.0368, 0.0060, 0.0288, 0.0180, 0.0633, 0.0028, 0.0133, 0.0072,\n",
      "        0.0161, 0.0201, 0.0210, 0.0142, 0.0189, 0.0042, 0.0104, 0.0029, 0.0095,\n",
      "        0.0532, 0.0347, 0.0255, 0.0240, 0.0113, 0.0402, 0.0096, 0.0224, 0.0122,\n",
      "        0.0030, 0.1078, 0.0048, 0.0040, 0.0575, 0.0030, 0.0127, 0.0236, 0.0104,\n",
      "        0.0407, 0.0059, 0.0435, 0.0253, 0.0086, 0.0253, 0.0118, 0.0045, 0.0096,\n",
      "        0.0029, 0.0094, 0.0075, 0.0066, 0.0112], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 74: BSNE_Loss=0.3108,local_loss: 0.3108032829430377,global_loss: 0.1403121054172516\n",
      "Q: tensor([0.0157, 0.0181, 0.0135, 0.0361, 0.0227, 0.0060, 0.0270, 0.0050, 0.0199,\n",
      "        0.0643, 0.0362, 0.0176, 0.0055, 0.0045, 0.0051, 0.0083, 0.0036, 0.0137,\n",
      "        0.0296, 0.0497, 0.0083, 0.0159, 0.0078, 0.0061, 0.0173, 0.0216, 0.0157,\n",
      "        0.0261, 0.0104, 0.0306, 0.0087, 0.0207, 0.0272, 0.0705, 0.0145, 0.0139,\n",
      "        0.0158, 0.0184, 0.0038, 0.0082, 0.0132, 0.0047, 0.0476, 0.0733, 0.0309,\n",
      "        0.0331, 0.0080, 0.0076, 0.0121, 0.0059], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 75: BSNE_Loss=0.2182,local_loss: 0.2182440560160239,global_loss: 0.6896477937698364\n",
      "Q: tensor([0.0168, 0.0165, 0.0188, 0.0271, 0.0220, 0.0063, 0.0168, 0.0186, 0.0366,\n",
      "        0.0173, 0.0259, 0.0294, 0.0076, 0.0034, 0.0078, 0.0109, 0.0211, 0.0185,\n",
      "        0.0266, 0.0123, 0.0045, 0.0072, 0.0253, 0.0051, 0.0157, 0.0302, 0.0107,\n",
      "        0.1104, 0.0101, 0.0255, 0.0066, 0.0035, 0.0132, 0.0133, 0.0218, 0.0113,\n",
      "        0.0058, 0.0069, 0.0135, 0.0150, 0.1060, 0.0322, 0.0423, 0.0121, 0.0061,\n",
      "        0.0091, 0.0167, 0.0144, 0.0271, 0.0178], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 76: BSNE_Loss=0.2083,local_loss: 0.20833811534398372,global_loss: 0.5674179196357727\n",
      "Q: tensor([0.0264, 0.0063, 0.0083, 0.0159, 0.0118, 0.0094, 0.0061, 0.0164, 0.0121,\n",
      "        0.0155, 0.0131, 0.0427, 0.0674, 0.0142, 0.0074, 0.0095, 0.0173, 0.0083,\n",
      "        0.0601, 0.0153, 0.0177, 0.0186, 0.0140, 0.0426, 0.0171, 0.0633, 0.0520,\n",
      "        0.0203, 0.0054, 0.0101, 0.0213, 0.0092, 0.0099, 0.0080, 0.0280, 0.0209,\n",
      "        0.0216, 0.0106, 0.0152, 0.0084, 0.0329, 0.0031, 0.0099, 0.0153, 0.0257,\n",
      "        0.0060, 0.0373, 0.0536, 0.0120, 0.0063], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 77: BSNE_Loss=0.1736,local_loss: 0.1735724354389672,global_loss: 0.41530683636665344\n",
      "Q: tensor([0.0163, 0.0887, 0.0167, 0.0600, 0.0045, 0.0104, 0.0208, 0.0293, 0.0402,\n",
      "        0.0109, 0.0238, 0.0046, 0.0072, 0.0229, 0.0166, 0.0033, 0.0063, 0.0106,\n",
      "        0.0016, 0.0134, 0.0054, 0.0924, 0.0168, 0.0150, 0.0046, 0.0037, 0.0113,\n",
      "        0.0051, 0.0220, 0.0334, 0.0169, 0.0054, 0.0385, 0.0225, 0.0134, 0.0100,\n",
      "        0.0207, 0.0196, 0.0093, 0.0089, 0.0055, 0.0549, 0.0179, 0.0132, 0.0375,\n",
      "        0.0296, 0.0102, 0.0232, 0.0168, 0.0082], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 78: BSNE_Loss=0.2699,local_loss: 0.2698878288143758,global_loss: 0.5274341106414795\n",
      "Q: tensor([0.0084, 0.0245, 0.0301, 0.0296, 0.0073, 0.0277, 0.0139, 0.0162, 0.0242,\n",
      "        0.0371, 0.0084, 0.0064, 0.0231, 0.0107, 0.0048, 0.0607, 0.0092, 0.0065,\n",
      "        0.0123, 0.0098, 0.0048, 0.0097, 0.0136, 0.0254, 0.0142, 0.0122, 0.0132,\n",
      "        0.0200, 0.0245, 0.0218, 0.0070, 0.0134, 0.0094, 0.0103, 0.0062, 0.0088,\n",
      "        0.0076, 0.0201, 0.0359, 0.0071, 0.1178, 0.0187, 0.0098, 0.0190, 0.0073,\n",
      "        0.0538, 0.0074, 0.0879, 0.0069, 0.0154], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 79: BSNE_Loss=0.2284,local_loss: 0.2283761626004805,global_loss: 0.25091612339019775\n",
      "Q: tensor([0.0231, 0.0798, 0.0126, 0.0081, 0.0058, 0.0154, 0.0314, 0.0703, 0.0240,\n",
      "        0.0211, 0.0225, 0.0095, 0.0176, 0.0055, 0.0365, 0.0251, 0.0187, 0.0091,\n",
      "        0.0127, 0.0135, 0.0070, 0.0449, 0.0035, 0.0172, 0.0086, 0.0214, 0.0322,\n",
      "        0.0126, 0.0211, 0.0201, 0.0212, 0.0158, 0.0072, 0.0056, 0.0172, 0.0819,\n",
      "        0.0194, 0.0249, 0.0086, 0.0247, 0.0126, 0.0032, 0.0174, 0.0040, 0.0101,\n",
      "        0.0091, 0.0312, 0.0236, 0.0073, 0.0046], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 80: BSNE_Loss=0.1996,local_loss: 0.19964482507052977,global_loss: 0.40509772300720215\n",
      "Q: tensor([0.0371, 0.0099, 0.0119, 0.0150, 0.0209, 0.0691, 0.0059, 0.0111, 0.0166,\n",
      "        0.0104, 0.0096, 0.0059, 0.0094, 0.0159, 0.0466, 0.0102, 0.0038, 0.0394,\n",
      "        0.0157, 0.0122, 0.0465, 0.0241, 0.0081, 0.0219, 0.0562, 0.0368, 0.0051,\n",
      "        0.0076, 0.0097, 0.0094, 0.0519, 0.0467, 0.0202, 0.0073, 0.0210, 0.0207,\n",
      "        0.0188, 0.0061, 0.0193, 0.0205, 0.0247, 0.0224, 0.0079, 0.0161, 0.0050,\n",
      "        0.0313, 0.0185, 0.0137, 0.0221, 0.0037], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 81: BSNE_Loss=0.1720,local_loss: 0.17196976962441624,global_loss: 0.41266393661499023\n",
      "Q: tensor([0.0216, 0.0264, 0.0125, 0.0038, 0.0838, 0.0100, 0.0054, 0.0942, 0.0247,\n",
      "        0.0037, 0.0154, 0.0097, 0.0125, 0.0289, 0.0034, 0.0104, 0.0095, 0.0106,\n",
      "        0.0057, 0.0180, 0.0079, 0.0138, 0.0179, 0.0158, 0.0131, 0.0113, 0.0086,\n",
      "        0.0070, 0.0570, 0.0233, 0.0087, 0.0124, 0.0092, 0.0035, 0.0054, 0.0126,\n",
      "        0.0145, 0.0032, 0.0202, 0.0078, 0.0442, 0.0308, 0.0226, 0.0401, 0.0052,\n",
      "        0.0246, 0.0570, 0.0333, 0.0518, 0.0071], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 82: BSNE_Loss=0.2785,local_loss: 0.27851557129114046,global_loss: 0.32000258564949036\n",
      "Q: tensor([0.0070, 0.0076, 0.0199, 0.0489, 0.0181, 0.0113, 0.0110, 0.0180, 0.0142,\n",
      "        0.0196, 0.0283, 0.0258, 0.0194, 0.0100, 0.0231, 0.0043, 0.0139, 0.0097,\n",
      "        0.0294, 0.0287, 0.0079, 0.0108, 0.0068, 0.0160, 0.0324, 0.0318, 0.0264,\n",
      "        0.0043, 0.0103, 0.0234, 0.0120, 0.0784, 0.0343, 0.0107, 0.0222, 0.0457,\n",
      "        0.0615, 0.0179, 0.0212, 0.0024, 0.0344, 0.0162, 0.0103, 0.0068, 0.0221,\n",
      "        0.0236, 0.0155, 0.0023, 0.0206, 0.0036], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 83: BSNE_Loss=0.1747,local_loss: 0.1747279018425941,global_loss: 0.38781946897506714\n",
      "Q: tensor([0.0144, 0.0060, 0.0483, 0.0049, 0.0237, 0.0067, 0.0284, 0.0133, 0.0130,\n",
      "        0.0201, 0.0212, 0.0034, 0.0160, 0.0246, 0.0253, 0.0218, 0.0099, 0.0095,\n",
      "        0.0078, 0.0184, 0.0276, 0.0085, 0.0063, 0.0109, 0.0379, 0.0047, 0.0094,\n",
      "        0.0604, 0.0103, 0.0259, 0.0135, 0.0147, 0.0951, 0.0391, 0.0143, 0.0102,\n",
      "        0.0103, 0.0350, 0.0252, 0.0239, 0.0054, 0.0060, 0.0045, 0.0168, 0.0060,\n",
      "        0.0690, 0.0252, 0.0316, 0.0034, 0.0124], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 84: BSNE_Loss=0.2233,local_loss: 0.22326156674545097,global_loss: 0.44801878929138184\n",
      "Q: tensor([0.0274, 0.0116, 0.0327, 0.0114, 0.0119, 0.0141, 0.0157, 0.0262, 0.0297,\n",
      "        0.0458, 0.0185, 0.0147, 0.0136, 0.0426, 0.0030, 0.0088, 0.0262, 0.0110,\n",
      "        0.0085, 0.0130, 0.0049, 0.0317, 0.0042, 0.0129, 0.0329, 0.0350, 0.0083,\n",
      "        0.0139, 0.0144, 0.0143, 0.0170, 0.0095, 0.0239, 0.0028, 0.0197, 0.0600,\n",
      "        0.0087, 0.0227, 0.0064, 0.0140, 0.0523, 0.0100, 0.0126, 0.0512, 0.0257,\n",
      "        0.0207, 0.0102, 0.0505, 0.0120, 0.0114], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 85: BSNE_Loss=0.1487,local_loss: 0.14867985696982883,global_loss: 0.6174982190132141\n",
      "Q: tensor([0.0435, 0.0063, 0.0066, 0.0164, 0.0345, 0.0097, 0.0084, 0.0275, 0.0280,\n",
      "        0.0160, 0.0131, 0.0325, 0.0243, 0.0170, 0.0045, 0.0063, 0.0640, 0.0130,\n",
      "        0.0140, 0.0150, 0.0096, 0.0350, 0.0402, 0.0593, 0.0203, 0.0055, 0.0117,\n",
      "        0.0049, 0.0145, 0.0119, 0.0317, 0.0226, 0.0130, 0.0244, 0.0146, 0.0172,\n",
      "        0.0161, 0.0181, 0.0152, 0.0240, 0.0317, 0.0175, 0.0101, 0.0146, 0.0132,\n",
      "        0.0250, 0.0102, 0.0370, 0.0213, 0.0088], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 86: BSNE_Loss=0.1026,local_loss: 0.10263176003152367,global_loss: 0.6107935905456543\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0307, 0.0161, 0.0058, 0.0113, 0.0081, 0.0145, 0.0444, 0.0308, 0.0146,\n",
      "        0.0112, 0.0377, 0.0520, 0.0407, 0.0257, 0.0016, 0.0157, 0.0090, 0.0148,\n",
      "        0.0178, 0.0185, 0.0081, 0.0094, 0.0198, 0.0060, 0.0137, 0.0253, 0.0236,\n",
      "        0.0884, 0.0289, 0.0234, 0.0249, 0.0131, 0.0040, 0.0169, 0.0111, 0.0159,\n",
      "        0.0245, 0.0230, 0.0045, 0.0065, 0.0152, 0.0372, 0.0068, 0.0077, 0.0305,\n",
      "        0.0403, 0.0147, 0.0077, 0.0078, 0.0201], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 87: BSNE_Loss=0.1685,local_loss: 0.1685178705358053,global_loss: 0.7549551129341125\n",
      "Q: tensor([0.0153, 0.0135, 0.0201, 0.0368, 0.0394, 0.0285, 0.0098, 0.0217, 0.0330,\n",
      "        0.0062, 0.0191, 0.0174, 0.0285, 0.0164, 0.0086, 0.0119, 0.0197, 0.0056,\n",
      "        0.0194, 0.0537, 0.0097, 0.0859, 0.0242, 0.0115, 0.0162, 0.0099, 0.0049,\n",
      "        0.0174, 0.0054, 0.0551, 0.0104, 0.0068, 0.0162, 0.0186, 0.0077, 0.0228,\n",
      "        0.0053, 0.0041, 0.0174, 0.0050, 0.0288, 0.0331, 0.0098, 0.0110, 0.0126,\n",
      "        0.0177, 0.0114, 0.0317, 0.0106, 0.0541], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 88: BSNE_Loss=0.1641,local_loss: 0.16407643638764868,global_loss: 0.39519423246383667\n",
      "Q: tensor([0.0308, 0.0079, 0.0108, 0.0154, 0.0258, 0.0544, 0.0184, 0.0490, 0.0191,\n",
      "        0.0220, 0.0103, 0.0118, 0.0119, 0.0264, 0.0177, 0.0027, 0.0030, 0.0206,\n",
      "        0.0521, 0.0165, 0.0163, 0.0073, 0.0159, 0.0261, 0.0240, 0.0102, 0.0124,\n",
      "        0.0410, 0.0046, 0.0091, 0.0032, 0.0071, 0.0202, 0.0758, 0.0266, 0.0074,\n",
      "        0.0350, 0.0374, 0.0095, 0.0196, 0.0199, 0.0269, 0.0162, 0.0195, 0.0123,\n",
      "        0.0140, 0.0065, 0.0158, 0.0227, 0.0107], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 89: BSNE_Loss=0.1555,local_loss: 0.1554626873274295,global_loss: 0.6990951895713806\n",
      "Q: tensor([0.0204, 0.0069, 0.0344, 0.0112, 0.0238, 0.0068, 0.0079, 0.0247, 0.0205,\n",
      "        0.0126, 0.0210, 0.0148, 0.0121, 0.0067, 0.0088, 0.0589, 0.0417, 0.0016,\n",
      "        0.0096, 0.0692, 0.0022, 0.0089, 0.0316, 0.0044, 0.0088, 0.0095, 0.0472,\n",
      "        0.0311, 0.0089, 0.0246, 0.0139, 0.0064, 0.0425, 0.0114, 0.0335, 0.0108,\n",
      "        0.0194, 0.0136, 0.0297, 0.0054, 0.0363, 0.0261, 0.0188, 0.0611, 0.0112,\n",
      "        0.0065, 0.0320, 0.0048, 0.0144, 0.0113], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 90: BSNE_Loss=0.2210,local_loss: 0.22103428616221257,global_loss: 0.10093080997467041\n",
      "Q: tensor([0.0177, 0.0028, 0.0198, 0.0144, 0.0224, 0.0048, 0.0116, 0.0302, 0.0562,\n",
      "        0.0123, 0.0231, 0.0155, 0.0112, 0.0052, 0.0440, 0.0290, 0.0212, 0.0038,\n",
      "        0.0251, 0.0091, 0.0126, 0.0275, 0.0272, 0.0186, 0.0177, 0.0484, 0.0331,\n",
      "        0.0162, 0.0299, 0.0381, 0.0052, 0.0060, 0.0217, 0.0063, 0.0084, 0.0145,\n",
      "        0.0021, 0.0088, 0.0275, 0.0066, 0.0315, 0.1061, 0.0136, 0.0211, 0.0067,\n",
      "        0.0147, 0.0156, 0.0100, 0.0146, 0.0103], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 91: BSNE_Loss=0.2028,local_loss: 0.20282067679004592,global_loss: 0.5542500615119934\n",
      "Q: tensor([0.0076, 0.0340, 0.0113, 0.0244, 0.0196, 0.0187, 0.0071, 0.0066, 0.0096,\n",
      "        0.0152, 0.0395, 0.0104, 0.0101, 0.0166, 0.0071, 0.0088, 0.0351, 0.0092,\n",
      "        0.0171, 0.0414, 0.0264, 0.0418, 0.0347, 0.0088, 0.0134, 0.0094, 0.0232,\n",
      "        0.0229, 0.0080, 0.0216, 0.0211, 0.0162, 0.0112, 0.0149, 0.0095, 0.0249,\n",
      "        0.0474, 0.0108, 0.0303, 0.0116, 0.0212, 0.0726, 0.0148, 0.0203, 0.0475,\n",
      "        0.0150, 0.0060, 0.0116, 0.0189, 0.0146], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 92: BSNE_Loss=0.1004,local_loss: 0.10038958332078289,global_loss: 0.4810013771057129\n",
      "Q: tensor([0.0208, 0.0133, 0.0074, 0.0641, 0.0093, 0.0319, 0.0099, 0.0117, 0.0364,\n",
      "        0.0143, 0.0109, 0.0211, 0.0042, 0.0130, 0.0022, 0.0340, 0.0126, 0.0286,\n",
      "        0.0128, 0.0070, 0.0092, 0.0225, 0.0242, 0.0057, 0.0225, 0.0088, 0.0073,\n",
      "        0.0174, 0.0090, 0.0056, 0.0367, 0.0056, 0.0057, 0.0158, 0.0175, 0.0255,\n",
      "        0.0121, 0.0160, 0.0351, 0.0317, 0.0124, 0.0178, 0.0642, 0.0085, 0.0750,\n",
      "        0.0381, 0.0117, 0.0171, 0.0446, 0.0113], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 93: BSNE_Loss=0.1844,local_loss: 0.18435702607839424,global_loss: 0.4579464793205261\n",
      "Q: tensor([0.0123, 0.0060, 0.0087, 0.0225, 0.0221, 0.0053, 0.0201, 0.0207, 0.0052,\n",
      "        0.0174, 0.0283, 0.0120, 0.0092, 0.0062, 0.0151, 0.0140, 0.0184, 0.0131,\n",
      "        0.0121, 0.0345, 0.0343, 0.0183, 0.0133, 0.0254, 0.0398, 0.0086, 0.0448,\n",
      "        0.0091, 0.0058, 0.0164, 0.0084, 0.0121, 0.0081, 0.0272, 0.0178, 0.0117,\n",
      "        0.0368, 0.0332, 0.0150, 0.0131, 0.0303, 0.0343, 0.0153, 0.0205, 0.0728,\n",
      "        0.0274, 0.0147, 0.0610, 0.0122, 0.0092], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 94: BSNE_Loss=0.1139,local_loss: 0.11392845976830923,global_loss: 0.6158850789070129\n",
      "Q: tensor([0.0074, 0.0201, 0.0654, 0.0181, 0.0197, 0.0068, 0.0073, 0.0221, 0.0073,\n",
      "        0.0057, 0.1134, 0.0157, 0.0265, 0.0088, 0.0134, 0.0177, 0.0104, 0.0180,\n",
      "        0.0112, 0.0129, 0.0031, 0.0060, 0.0128, 0.0219, 0.0565, 0.0099, 0.0198,\n",
      "        0.0332, 0.0051, 0.0320, 0.0240, 0.0041, 0.0175, 0.0298, 0.0132, 0.0455,\n",
      "        0.0073, 0.0127, 0.0084, 0.0136, 0.0096, 0.0215, 0.0187, 0.0065, 0.0201,\n",
      "        0.0594, 0.0065, 0.0275, 0.0142, 0.0118], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 95: BSNE_Loss=0.2127,local_loss: 0.21266740149711078,global_loss: 0.23677963018417358\n",
      "Q: tensor([0.0254, 0.0234, 0.0727, 0.0257, 0.0141, 0.0228, 0.0113, 0.0241, 0.0204,\n",
      "        0.0090, 0.0109, 0.0289, 0.0119, 0.0261, 0.0157, 0.0716, 0.0054, 0.0136,\n",
      "        0.0111, 0.0191, 0.0092, 0.0200, 0.0069, 0.0186, 0.0147, 0.0448, 0.0314,\n",
      "        0.0117, 0.0170, 0.0077, 0.0189, 0.0053, 0.0149, 0.0386, 0.0129, 0.0177,\n",
      "        0.0272, 0.0131, 0.0090, 0.0309, 0.0055, 0.0139, 0.0048, 0.0170, 0.0456,\n",
      "        0.0054, 0.0125, 0.0121, 0.0379, 0.0120], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 96: BSNE_Loss=0.1251,local_loss: 0.1250851129551215,global_loss: 0.5273351669311523\n",
      "Q: tensor([0.0304, 0.0157, 0.0132, 0.0315, 0.0149, 0.0074, 0.0090, 0.0226, 0.0148,\n",
      "        0.0204, 0.0108, 0.0253, 0.0069, 0.0078, 0.0116, 0.0093, 0.1157, 0.0080,\n",
      "        0.0112, 0.0133, 0.0074, 0.0127, 0.0272, 0.0370, 0.0239, 0.0143, 0.0081,\n",
      "        0.0171, 0.0119, 0.0166, 0.0253, 0.0241, 0.0223, 0.0159, 0.0194, 0.0174,\n",
      "        0.0134, 0.0039, 0.0310, 0.0079, 0.0265, 0.0315, 0.0177, 0.0132, 0.0252,\n",
      "        0.0157, 0.0833, 0.0030, 0.0198, 0.0079], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 97: BSNE_Loss=0.1544,local_loss: 0.15435860633270732,global_loss: 0.348001092672348\n",
      "Q: tensor([0.0174, 0.0171, 0.0185, 0.0197, 0.0069, 0.0231, 0.0103, 0.1071, 0.0084,\n",
      "        0.0220, 0.0091, 0.0400, 0.0170, 0.0022, 0.0070, 0.0067, 0.0051, 0.0169,\n",
      "        0.0194, 0.0093, 0.0216, 0.0144, 0.0340, 0.0298, 0.0246, 0.0408, 0.0143,\n",
      "        0.0203, 0.0159, 0.0307, 0.0104, 0.0139, 0.0185, 0.0070, 0.0234, 0.0058,\n",
      "        0.0094, 0.0071, 0.0173, 0.0061, 0.0116, 0.0114, 0.0303, 0.0105, 0.0067,\n",
      "        0.0576, 0.0707, 0.0143, 0.0175, 0.0207], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 98: BSNE_Loss=0.1832,local_loss: 0.18322902671700297,global_loss: 0.4540862739086151\n",
      "Q: tensor([0.0222, 0.0329, 0.0109, 0.0962, 0.0435, 0.0140, 0.0091, 0.0249, 0.0127,\n",
      "        0.0056, 0.0118, 0.0065, 0.0092, 0.0089, 0.0100, 0.0183, 0.0382, 0.0066,\n",
      "        0.0083, 0.0561, 0.0101, 0.0390, 0.0077, 0.0441, 0.0163, 0.0122, 0.0052,\n",
      "        0.0155, 0.0325, 0.0079, 0.0166, 0.0324, 0.0276, 0.0063, 0.0092, 0.0093,\n",
      "        0.0301, 0.0435, 0.0163, 0.0088, 0.0040, 0.0056, 0.0162, 0.0060, 0.0056,\n",
      "        0.0319, 0.0182, 0.0255, 0.0329, 0.0175], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 99: BSNE_Loss=0.1962,local_loss: 0.1961689844646177,global_loss: 0.28242313861846924\n",
      "Q: tensor([0.0139, 0.0091, 0.0136, 0.0223, 0.0101, 0.0127, 0.0256, 0.0053, 0.0107,\n",
      "        0.0103, 0.0197, 0.0119, 0.0444, 0.0304, 0.0238, 0.0062, 0.0156, 0.0269,\n",
      "        0.0154, 0.0118, 0.0099, 0.0043, 0.0162, 0.0357, 0.0291, 0.0116, 0.0551,\n",
      "        0.0071, 0.0157, 0.0086, 0.0336, 0.0531, 0.0023, 0.0185, 0.0332, 0.0428,\n",
      "        0.0904, 0.0311, 0.0104, 0.0137, 0.0110, 0.0098, 0.0051, 0.0129, 0.0197,\n",
      "        0.0087, 0.0303, 0.0191, 0.0114, 0.0099], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 100: BSNE_Loss=0.1660,local_loss: 0.16597687571838057,global_loss: 0.2426805943250656\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0055, 0.0054, 0.0160, 0.0043, 0.0124, 0.0113, 0.0110, 0.0046, 0.0197,\n",
      "        0.0066, 0.0077, 0.0144, 0.0735, 0.0125, 0.0092, 0.0024, 0.0057, 0.0241,\n",
      "        0.0193, 0.0318, 0.0031, 0.0029, 0.0176, 0.0139, 0.0223, 0.0100, 0.0087,\n",
      "        0.0174, 0.0509, 0.0206, 0.0174, 0.0123, 0.0119, 0.0223, 0.0172, 0.0571,\n",
      "        0.0137, 0.0585, 0.0156, 0.0139, 0.0246, 0.0229, 0.0365, 0.0443, 0.0302,\n",
      "        0.0332, 0.0617, 0.0198, 0.0100, 0.0121], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 101: BSNE_Loss=0.2139,local_loss: 0.21385413587101923,global_loss: 0.5682838559150696\n",
      "Q: tensor([0.0434, 0.0055, 0.0117, 0.0234, 0.0096, 0.0220, 0.0110, 0.0121, 0.0227,\n",
      "        0.0210, 0.0238, 0.0177, 0.0120, 0.0207, 0.0075, 0.0018, 0.0223, 0.0110,\n",
      "        0.0041, 0.0070, 0.0064, 0.0213, 0.0155, 0.0102, 0.0211, 0.0203, 0.0069,\n",
      "        0.0171, 0.0211, 0.0180, 0.0104, 0.0121, 0.0341, 0.0215, 0.0291, 0.0073,\n",
      "        0.0066, 0.0165, 0.0285, 0.0117, 0.0617, 0.0109, 0.1057, 0.0140, 0.0183,\n",
      "        0.0858, 0.0125, 0.0248, 0.0100, 0.0100], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 102: BSNE_Loss=0.1884,local_loss: 0.18836382261176624,global_loss: 0.6307628154754639\n",
      "Q: tensor([0.0162, 0.0030, 0.0040, 0.0050, 0.0381, 0.0155, 0.0111, 0.0694, 0.0201,\n",
      "        0.0708, 0.0288, 0.0277, 0.0066, 0.0088, 0.0091, 0.0152, 0.0102, 0.0144,\n",
      "        0.0136, 0.0140, 0.0051, 0.0188, 0.0353, 0.0448, 0.0176, 0.0250, 0.0246,\n",
      "        0.0077, 0.0064, 0.0199, 0.0250, 0.0026, 0.0151, 0.0086, 0.0142, 0.0064,\n",
      "        0.0064, 0.0138, 0.0263, 0.0105, 0.0103, 0.0482, 0.0074, 0.0567, 0.0375,\n",
      "        0.0096, 0.0091, 0.0594, 0.0220, 0.0043], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 103: BSNE_Loss=0.2345,local_loss: 0.234484092716973,global_loss: 0.5126201510429382\n",
      "Q: tensor([0.0048, 0.0067, 0.0373, 0.0242, 0.0150, 0.0129, 0.0106, 0.0143, 0.0933,\n",
      "        0.0085, 0.0089, 0.0203, 0.0091, 0.0188, 0.0119, 0.0260, 0.0084, 0.0133,\n",
      "        0.0068, 0.0074, 0.0201, 0.0067, 0.0153, 0.0684, 0.0451, 0.0075, 0.0575,\n",
      "        0.0053, 0.0309, 0.0171, 0.0378, 0.0069, 0.0101, 0.0066, 0.0135, 0.0195,\n",
      "        0.0084, 0.0283, 0.0073, 0.0179, 0.0164, 0.0522, 0.0177, 0.0054, 0.0419,\n",
      "        0.0199, 0.0091, 0.0175, 0.0097, 0.0216], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 104: BSNE_Loss=0.1960,local_loss: 0.1960274428500607,global_loss: 0.6359232664108276\n",
      "Q: tensor([0.0344, 0.0211, 0.0181, 0.0129, 0.0102, 0.0080, 0.0184, 0.0119, 0.0098,\n",
      "        0.0026, 0.0125, 0.0101, 0.0154, 0.0135, 0.0629, 0.0196, 0.0058, 0.0088,\n",
      "        0.0151, 0.0418, 0.0077, 0.0440, 0.0372, 0.0447, 0.0093, 0.0069, 0.0319,\n",
      "        0.0206, 0.0188, 0.0211, 0.0150, 0.0150, 0.0555, 0.0131, 0.0202, 0.0104,\n",
      "        0.0221, 0.0309, 0.0054, 0.0090, 0.0043, 0.0121, 0.0418, 0.0347, 0.0401,\n",
      "        0.0172, 0.0225, 0.0138, 0.0113, 0.0108], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 105: BSNE_Loss=0.1375,local_loss: 0.1375411092838075,global_loss: 0.26622527837753296\n",
      "Q: tensor([0.0587, 0.0078, 0.0113, 0.0297, 0.0267, 0.0169, 0.0353, 0.0179, 0.0083,\n",
      "        0.0577, 0.0538, 0.0139, 0.0104, 0.0097, 0.0092, 0.0065, 0.0306, 0.0158,\n",
      "        0.0165, 0.0747, 0.0164, 0.0090, 0.0056, 0.0161, 0.0089, 0.0048, 0.0191,\n",
      "        0.0350, 0.0091, 0.0152, 0.0115, 0.0210, 0.0091, 0.0429, 0.0056, 0.0108,\n",
      "        0.0113, 0.0127, 0.0175, 0.0065, 0.0067, 0.0100, 0.0070, 0.0281, 0.0193,\n",
      "        0.0106, 0.0883, 0.0149, 0.0107, 0.0047], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 106: BSNE_Loss=0.2073,local_loss: 0.20729247755794325,global_loss: 0.36772122979164124\n",
      "Q: tensor([0.0045, 0.0097, 0.0363, 0.0094, 0.0409, 0.0189, 0.0317, 0.0081, 0.0047,\n",
      "        0.0115, 0.0325, 0.0195, 0.0184, 0.0088, 0.0111, 0.0219, 0.0089, 0.0119,\n",
      "        0.0157, 0.0092, 0.0298, 0.0133, 0.0205, 0.0150, 0.0149, 0.0046, 0.0458,\n",
      "        0.0294, 0.0091, 0.0116, 0.0048, 0.0180, 0.0313, 0.0255, 0.0133, 0.0463,\n",
      "        0.0277, 0.0242, 0.0116, 0.0228, 0.0065, 0.0139, 0.0409, 0.0035, 0.0069,\n",
      "        0.0230, 0.0312, 0.0773, 0.0318, 0.0120], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 107: BSNE_Loss=0.1470,local_loss: 0.14697146496286048,global_loss: 0.3852154612541199\n",
      "Q: tensor([0.0202, 0.0114, 0.0160, 0.0725, 0.0191, 0.0243, 0.0044, 0.0491, 0.0254,\n",
      "        0.0125, 0.0186, 0.0185, 0.0047, 0.0140, 0.0085, 0.0081, 0.0266, 0.0336,\n",
      "        0.0046, 0.0979, 0.0125, 0.0358, 0.0111, 0.0098, 0.0275, 0.0571, 0.0074,\n",
      "        0.0174, 0.0105, 0.0077, 0.0249, 0.0194, 0.0026, 0.0062, 0.0144, 0.0148,\n",
      "        0.0224, 0.0101, 0.0078, 0.0050, 0.0290, 0.0048, 0.0048, 0.0228, 0.0091,\n",
      "        0.0216, 0.0075, 0.0354, 0.0341, 0.0166], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 108: BSNE_Loss=0.2191,local_loss: 0.21911871806289465,global_loss: 0.15593066811561584\n",
      "Q: tensor([0.0144, 0.0148, 0.0137, 0.0151, 0.0050, 0.0236, 0.0057, 0.0309, 0.0122,\n",
      "        0.0187, 0.0303, 0.0115, 0.0096, 0.0170, 0.0376, 0.0035, 0.0228, 0.0065,\n",
      "        0.0099, 0.0090, 0.0074, 0.0295, 0.0143, 0.0150, 0.0117, 0.0165, 0.0234,\n",
      "        0.0254, 0.0034, 0.0300, 0.0224, 0.0065, 0.0169, 0.0048, 0.0071, 0.0159,\n",
      "        0.0370, 0.0197, 0.0160, 0.0140, 0.0441, 0.0100, 0.0361, 0.0159, 0.1115,\n",
      "        0.0235, 0.0082, 0.0609, 0.0316, 0.0095], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 109: BSNE_Loss=0.1753,local_loss: 0.17534999881550767,global_loss: 0.7763631343841553\n",
      "Q: tensor([0.0121, 0.0112, 0.0053, 0.0113, 0.0115, 0.0061, 0.0124, 0.0086, 0.0194,\n",
      "        0.0172, 0.0071, 0.0054, 0.0953, 0.0555, 0.0093, 0.0484, 0.0042, 0.0104,\n",
      "        0.0079, 0.0158, 0.0139, 0.0123, 0.0082, 0.0128, 0.0361, 0.0117, 0.0109,\n",
      "        0.0343, 0.0303, 0.0116, 0.0520, 0.0337, 0.0232, 0.0145, 0.0410, 0.0295,\n",
      "        0.0026, 0.0147, 0.0098, 0.0185, 0.0144, 0.0128, 0.0660, 0.0082, 0.0045,\n",
      "        0.0151, 0.0045, 0.0391, 0.0286, 0.0105], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 110: BSNE_Loss=0.2276,local_loss: 0.22758558251933594,global_loss: 0.30325743556022644\n",
      "Q: tensor([0.0155, 0.0245, 0.0382, 0.0143, 0.0104, 0.0606, 0.0351, 0.0178, 0.0119,\n",
      "        0.0089, 0.0076, 0.0169, 0.0189, 0.0136, 0.0086, 0.0085, 0.0534, 0.0069,\n",
      "        0.0090, 0.0285, 0.0077, 0.0221, 0.0138, 0.0200, 0.0102, 0.0112, 0.0104,\n",
      "        0.0164, 0.0143, 0.0121, 0.0302, 0.0295, 0.0116, 0.0159, 0.0085, 0.0199,\n",
      "        0.0318, 0.0085, 0.0113, 0.0267, 0.0269, 0.0237, 0.0120, 0.0043, 0.0640,\n",
      "        0.0079, 0.0512, 0.0346, 0.0169, 0.0170], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 111: BSNE_Loss=0.1113,local_loss: 0.11128084246270153,global_loss: 0.5070065259933472\n",
      "Q: tensor([0.0153, 0.0084, 0.0027, 0.0556, 0.0354, 0.0079, 0.0189, 0.0091, 0.0205,\n",
      "        0.0148, 0.0093, 0.0359, 0.0043, 0.0699, 0.0422, 0.0081, 0.0204, 0.0104,\n",
      "        0.0376, 0.0091, 0.0270, 0.0057, 0.0137, 0.0210, 0.0166, 0.0045, 0.0770,\n",
      "        0.0080, 0.0142, 0.0234, 0.0150, 0.0162, 0.0114, 0.0166, 0.0201, 0.0278,\n",
      "        0.0097, 0.0069, 0.0100, 0.0187, 0.0226, 0.0073, 0.0217, 0.0378, 0.0195,\n",
      "        0.0040, 0.0041, 0.0455, 0.0228, 0.0154], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 112: BSNE_Loss=0.1927,local_loss: 0.19270003681222406,global_loss: 0.3210936486721039\n",
      "Q: tensor([0.0459, 0.0194, 0.0289, 0.0059, 0.0448, 0.0636, 0.0110, 0.0161, 0.0168,\n",
      "        0.0058, 0.0172, 0.0179, 0.0215, 0.0155, 0.0048, 0.0067, 0.0045, 0.0195,\n",
      "        0.0045, 0.0247, 0.0135, 0.0120, 0.0166, 0.0146, 0.0047, 0.0273, 0.0127,\n",
      "        0.0435, 0.0145, 0.0143, 0.0073, 0.0080, 0.0189, 0.0143, 0.0168, 0.0163,\n",
      "        0.0118, 0.0145, 0.0471, 0.0402, 0.0686, 0.0109, 0.0587, 0.0036, 0.0104,\n",
      "        0.0217, 0.0288, 0.0169, 0.0092, 0.0070], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 113: BSNE_Loss=0.1773,local_loss: 0.17730272352682178,global_loss: 0.6521317362785339\n",
      "Q: tensor([0.0174, 0.0138, 0.0350, 0.0188, 0.0110, 0.0316, 0.0088, 0.0180, 0.0184,\n",
      "        0.0077, 0.0167, 0.0084, 0.0273, 0.0206, 0.0078, 0.0034, 0.0060, 0.0058,\n",
      "        0.0701, 0.0062, 0.0080, 0.0084, 0.0094, 0.0152, 0.0368, 0.0034, 0.0197,\n",
      "        0.0093, 0.0521, 0.0644, 0.0050, 0.0132, 0.0200, 0.0385, 0.0120, 0.0079,\n",
      "        0.0116, 0.0235, 0.0133, 0.0123, 0.0162, 0.0272, 0.0071, 0.0120, 0.0432,\n",
      "        0.0521, 0.0243, 0.0464, 0.0270, 0.0080], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 114: BSNE_Loss=0.1890,local_loss: 0.18901270992052183,global_loss: 0.25681591033935547\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0383, 0.0101, 0.0111, 0.0226, 0.0088, 0.0460, 0.0125, 0.0089, 0.0251,\n",
      "        0.0049, 0.0496, 0.0406, 0.0133, 0.0064, 0.0121, 0.0030, 0.0180, 0.0037,\n",
      "        0.0094, 0.0165, 0.0078, 0.0169, 0.0106, 0.0232, 0.0195, 0.0082, 0.0213,\n",
      "        0.0132, 0.0089, 0.0046, 0.0195, 0.0103, 0.0096, 0.0160, 0.0068, 0.0577,\n",
      "        0.0128, 0.0182, 0.0746, 0.0176, 0.0038, 0.0102, 0.0253, 0.0544, 0.0031,\n",
      "        0.0573, 0.0089, 0.0226, 0.0641, 0.0119], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 115: BSNE_Loss=0.2367,local_loss: 0.2366984254065014,global_loss: 0.34733259677886963\n",
      "Q: tensor([0.0078, 0.0066, 0.0261, 0.0254, 0.0284, 0.0338, 0.0278, 0.0385, 0.0145,\n",
      "        0.0150, 0.0086, 0.0104, 0.0060, 0.0029, 0.0495, 0.0122, 0.0293, 0.0134,\n",
      "        0.0527, 0.0133, 0.0158, 0.0145, 0.0227, 0.0244, 0.0149, 0.0201, 0.0129,\n",
      "        0.0122, 0.0101, 0.0307, 0.0487, 0.0099, 0.0236, 0.0142, 0.0168, 0.0130,\n",
      "        0.0438, 0.0121, 0.0049, 0.0138, 0.0106, 0.0160, 0.0086, 0.0296, 0.0169,\n",
      "        0.0134, 0.0254, 0.0336, 0.0151, 0.0291], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 116: BSNE_Loss=0.0903,local_loss: 0.09034177539459397,global_loss: 0.5367546081542969\n",
      "Q: tensor([0.0106, 0.0444, 0.0211, 0.0060, 0.0365, 0.0156, 0.0059, 0.0216, 0.0110,\n",
      "        0.0148, 0.0464, 0.0369, 0.0081, 0.0035, 0.0204, 0.0067, 0.0027, 0.0036,\n",
      "        0.0562, 0.0105, 0.0038, 0.0113, 0.0096, 0.0280, 0.0213, 0.0217, 0.0168,\n",
      "        0.0137, 0.0171, 0.0278, 0.0154, 0.0111, 0.0123, 0.0465, 0.0123, 0.0106,\n",
      "        0.0059, 0.0055, 0.0128, 0.0141, 0.0216, 0.0067, 0.0092, 0.0163, 0.0434,\n",
      "        0.0217, 0.0201, 0.0505, 0.0891, 0.0213], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 117: BSNE_Loss=0.2093,local_loss: 0.20929246678580285,global_loss: 0.3741360306739807\n",
      "Q: tensor([0.0118, 0.0291, 0.0346, 0.0076, 0.0230, 0.0425, 0.0083, 0.0293, 0.0112,\n",
      "        0.0161, 0.0354, 0.0242, 0.0170, 0.0108, 0.0235, 0.0204, 0.0191, 0.0109,\n",
      "        0.0270, 0.0222, 0.0172, 0.0158, 0.0353, 0.0138, 0.0240, 0.0247, 0.0214,\n",
      "        0.0156, 0.0177, 0.0277, 0.0089, 0.0094, 0.0060, 0.0085, 0.0465, 0.0105,\n",
      "        0.0203, 0.0518, 0.0283, 0.0109, 0.0205, 0.0079, 0.0180, 0.0018, 0.0087,\n",
      "        0.0064, 0.0154, 0.0453, 0.0207, 0.0168], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 118: BSNE_Loss=0.0849,local_loss: 0.08490520784291031,global_loss: 0.5196415185928345\n",
      "Q: tensor([0.0093, 0.0565, 0.0247, 0.0076, 0.0294, 0.0303, 0.0220, 0.0100, 0.0213,\n",
      "        0.0267, 0.0065, 0.0144, 0.0103, 0.0111, 0.0057, 0.0065, 0.0583, 0.0084,\n",
      "        0.0312, 0.0237, 0.0211, 0.0376, 0.0106, 0.0128, 0.0106, 0.0047, 0.0166,\n",
      "        0.0149, 0.0741, 0.0114, 0.0172, 0.0078, 0.0098, 0.0424, 0.0251, 0.0368,\n",
      "        0.0069, 0.0144, 0.0149, 0.0043, 0.0309, 0.0043, 0.0815, 0.0071, 0.0153,\n",
      "        0.0059, 0.0146, 0.0166, 0.0090, 0.0068], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 119: BSNE_Loss=0.2038,local_loss: 0.20378862222592758,global_loss: 0.4798664450645447\n",
      "Q: tensor([0.0140, 0.0212, 0.0283, 0.0252, 0.0448, 0.0079, 0.0222, 0.0091, 0.0097,\n",
      "        0.0215, 0.0083, 0.0228, 0.0333, 0.0031, 0.0343, 0.0130, 0.0124, 0.0414,\n",
      "        0.0123, 0.0205, 0.0471, 0.0073, 0.0149, 0.0201, 0.0258, 0.0051, 0.0149,\n",
      "        0.0037, 0.0332, 0.0208, 0.0168, 0.0202, 0.0261, 0.0261, 0.0242, 0.0137,\n",
      "        0.0085, 0.0497, 0.0056, 0.0204, 0.0134, 0.0083, 0.0289, 0.0136, 0.0300,\n",
      "        0.0130, 0.0156, 0.0306, 0.0300, 0.0071], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 120: BSNE_Loss=0.0980,local_loss: 0.09796423847578996,global_loss: 0.44204723834991455\n",
      "Q: tensor([0.0098, 0.0109, 0.0308, 0.0260, 0.0254, 0.0096, 0.0068, 0.0435, 0.0301,\n",
      "        0.0066, 0.0081, 0.0078, 0.0456, 0.0112, 0.0029, 0.0051, 0.0525, 0.0230,\n",
      "        0.0106, 0.0124, 0.0402, 0.0217, 0.0318, 0.0053, 0.0077, 0.0191, 0.0518,\n",
      "        0.0223, 0.0302, 0.0166, 0.0098, 0.0349, 0.0200, 0.0483, 0.0085, 0.0042,\n",
      "        0.0098, 0.0262, 0.0061, 0.0067, 0.0179, 0.0387, 0.0204, 0.0067, 0.0514,\n",
      "        0.0143, 0.0052, 0.0207, 0.0170, 0.0079], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 121: BSNE_Loss=0.1860,local_loss: 0.1860408589553122,global_loss: 0.3440299928188324\n",
      "Q: tensor([0.0181, 0.0385, 0.0203, 0.0278, 0.0161, 0.0135, 0.0140, 0.0078, 0.0183,\n",
      "        0.0363, 0.0191, 0.0220, 0.0119, 0.0094, 0.0158, 0.0232, 0.0057, 0.0075,\n",
      "        0.0184, 0.0246, 0.0320, 0.0340, 0.0120, 0.0114, 0.0356, 0.0125, 0.0069,\n",
      "        0.0356, 0.0124, 0.0079, 0.0197, 0.0066, 0.0041, 0.0217, 0.0197, 0.0072,\n",
      "        0.0110, 0.0098, 0.0374, 0.0098, 0.0114, 0.0304, 0.0071, 0.0146, 0.0048,\n",
      "        0.0069, 0.1459, 0.0310, 0.0167, 0.0155], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 122: BSNE_Loss=0.1715,local_loss: 0.1715478294431395,global_loss: 0.80586838722229\n",
      "Q: tensor([0.0214, 0.0272, 0.0268, 0.0194, 0.0122, 0.0037, 0.0238, 0.0181, 0.0461,\n",
      "        0.0109, 0.0151, 0.0164, 0.0360, 0.0140, 0.0267, 0.0078, 0.0087, 0.0181,\n",
      "        0.0163, 0.0206, 0.0269, 0.0141, 0.0102, 0.0198, 0.0192, 0.0106, 0.0310,\n",
      "        0.0335, 0.0273, 0.0236, 0.0107, 0.0240, 0.0133, 0.0111, 0.0137, 0.0018,\n",
      "        0.0123, 0.0070, 0.0095, 0.0130, 0.0171, 0.0201, 0.0460, 0.0625, 0.0224,\n",
      "        0.0084, 0.0617, 0.0094, 0.0131, 0.0174], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 123: BSNE_Loss=0.1005,local_loss: 0.10046140376466223,global_loss: 0.37958118319511414\n",
      "Q: tensor([0.0048, 0.0416, 0.0130, 0.0059, 0.0100, 0.0350, 0.0254, 0.0119, 0.0225,\n",
      "        0.0361, 0.0170, 0.0159, 0.0133, 0.0096, 0.0235, 0.0067, 0.0300, 0.0283,\n",
      "        0.0270, 0.0218, 0.0209, 0.0123, 0.0222, 0.0209, 0.0209, 0.0553, 0.0431,\n",
      "        0.0211, 0.0055, 0.0205, 0.0130, 0.0318, 0.0353, 0.0058, 0.0128, 0.0071,\n",
      "        0.0156, 0.0276, 0.0109, 0.0094, 0.0092, 0.0117, 0.0271, 0.0063, 0.0349,\n",
      "        0.0304, 0.0256, 0.0187, 0.0143, 0.0104], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 124: BSNE_Loss=0.0839,local_loss: 0.0838713097681851,global_loss: 0.4064352512359619\n",
      "Q: tensor([0.0278, 0.0117, 0.0151, 0.0069, 0.0161, 0.0533, 0.0396, 0.0693, 0.0127,\n",
      "        0.0112, 0.0225, 0.0333, 0.0104, 0.0105, 0.0076, 0.0172, 0.0112, 0.0219,\n",
      "        0.0086, 0.0189, 0.0183, 0.0125, 0.0190, 0.0112, 0.0077, 0.0183, 0.0209,\n",
      "        0.0110, 0.0092, 0.0494, 0.0058, 0.0091, 0.0050, 0.0079, 0.0139, 0.0531,\n",
      "        0.0239, 0.0280, 0.0171, 0.0305, 0.0183, 0.0262, 0.0063, 0.0257, 0.0054,\n",
      "        0.0301, 0.0079, 0.0290, 0.0438, 0.0098], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 125: BSNE_Loss=0.1306,local_loss: 0.13060998088046094,global_loss: 0.45152854919433594\n",
      "Q: tensor([0.0694, 0.0536, 0.0494, 0.0118, 0.0115, 0.0115, 0.0076, 0.0040, 0.0300,\n",
      "        0.0096, 0.0041, 0.0183, 0.0258, 0.0058, 0.0085, 0.0048, 0.0141, 0.0032,\n",
      "        0.0170, 0.0205, 0.0073, 0.0664, 0.0234, 0.0247, 0.0060, 0.0792, 0.0077,\n",
      "        0.0083, 0.0150, 0.0110, 0.0088, 0.0060, 0.0177, 0.0420, 0.0118, 0.0304,\n",
      "        0.0048, 0.0231, 0.0135, 0.0086, 0.0064, 0.0439, 0.0073, 0.0086, 0.0114,\n",
      "        0.0132, 0.0383, 0.0166, 0.0358, 0.0226], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 126: BSNE_Loss=0.2493,local_loss: 0.24931303918634104,global_loss: 0.10937868803739548\n",
      "Q: tensor([0.0136, 0.0120, 0.0132, 0.0176, 0.0140, 0.0126, 0.0123, 0.0156, 0.0082,\n",
      "        0.0223, 0.0301, 0.0220, 0.0173, 0.0235, 0.0404, 0.0168, 0.0059, 0.0298,\n",
      "        0.0850, 0.0053, 0.0076, 0.0514, 0.0045, 0.0264, 0.0313, 0.0048, 0.0074,\n",
      "        0.0133, 0.0144, 0.0118, 0.0073, 0.0049, 0.0167, 0.0084, 0.0334, 0.0249,\n",
      "        0.0169, 0.0091, 0.0257, 0.0055, 0.0267, 0.0303, 0.0183, 0.0168, 0.0110,\n",
      "        0.0431, 0.0265, 0.0215, 0.0313, 0.0316], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 127: BSNE_Loss=0.1304,local_loss: 0.1303649715363417,global_loss: 0.6628339290618896\n",
      "Q: tensor([0.0149, 0.0087, 0.0187, 0.0116, 0.0248, 0.0153, 0.0113, 0.0267, 0.0243,\n",
      "        0.0110, 0.0165, 0.0265, 0.0051, 0.0110, 0.0083, 0.0129, 0.0423, 0.0201,\n",
      "        0.0215, 0.0521, 0.0211, 0.0092, 0.1360, 0.0071, 0.0094, 0.0224, 0.0061,\n",
      "        0.0053, 0.0078, 0.0434, 0.0096, 0.0045, 0.0088, 0.0571, 0.0207, 0.0281,\n",
      "        0.0113, 0.0119, 0.0094, 0.0311, 0.0241, 0.0128, 0.0055, 0.0309, 0.0069,\n",
      "        0.0158, 0.0034, 0.0227, 0.0257, 0.0084], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 128: BSNE_Loss=0.2106,local_loss: 0.21063657567602015,global_loss: 0.3191108703613281\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0182, 0.0287, 0.0306, 0.0183, 0.0244, 0.0200, 0.0109, 0.0358, 0.0311,\n",
      "        0.0153, 0.0135, 0.0089, 0.0618, 0.0092, 0.0404, 0.0091, 0.0205, 0.0224,\n",
      "        0.0082, 0.0559, 0.0087, 0.0031, 0.0113, 0.0140, 0.0102, 0.0178, 0.0096,\n",
      "        0.0136, 0.0146, 0.0164, 0.0091, 0.0094, 0.0095, 0.0075, 0.0260, 0.0128,\n",
      "        0.0070, 0.0110, 0.0174, 0.0248, 0.0233, 0.0260, 0.0290, 0.0371, 0.0355,\n",
      "        0.0196, 0.0398, 0.0299, 0.0169, 0.0062], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 129: BSNE_Loss=0.0966,local_loss: 0.09660887013722544,global_loss: 0.4794662594795227\n",
      "Q: tensor([0.0119, 0.0348, 0.0148, 0.0125, 0.0183, 0.0111, 0.0597, 0.0367, 0.0230,\n",
      "        0.0050, 0.0127, 0.0227, 0.0358, 0.0200, 0.0150, 0.0225, 0.0199, 0.0305,\n",
      "        0.0319, 0.0262, 0.0296, 0.0175, 0.0279, 0.0201, 0.0142, 0.0074, 0.0046,\n",
      "        0.0053, 0.0171, 0.0397, 0.0165, 0.0151, 0.0192, 0.0234, 0.0093, 0.0181,\n",
      "        0.0137, 0.0427, 0.0297, 0.0174, 0.0065, 0.0210, 0.0416, 0.0098, 0.0148,\n",
      "        0.0052, 0.0059, 0.0162, 0.0133, 0.0117], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 130: BSNE_Loss=0.0821,local_loss: 0.08206277394313027,global_loss: 0.7445020079612732\n",
      "Q: tensor([0.0080, 0.0102, 0.0189, 0.0087, 0.0310, 0.0082, 0.0175, 0.0225, 0.0118,\n",
      "        0.0254, 0.0068, 0.0152, 0.0123, 0.0177, 0.0120, 0.0090, 0.0120, 0.0087,\n",
      "        0.0074, 0.0084, 0.0020, 0.0342, 0.0116, 0.0848, 0.0065, 0.0069, 0.0137,\n",
      "        0.0409, 0.0487, 0.0140, 0.1094, 0.0075, 0.0114, 0.0324, 0.0280, 0.0070,\n",
      "        0.0078, 0.0177, 0.0734, 0.0085, 0.0248, 0.0080, 0.0123, 0.0035, 0.0270,\n",
      "        0.0047, 0.0302, 0.0234, 0.0253, 0.0027], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 131: BSNE_Loss=0.2722,local_loss: 0.272154801783264,global_loss: 0.55864417552948\n",
      "Q: tensor([0.0099, 0.0222, 0.0938, 0.0226, 0.0651, 0.0182, 0.0073, 0.0230, 0.0327,\n",
      "        0.0143, 0.0077, 0.0103, 0.0121, 0.0106, 0.0101, 0.0345, 0.0174, 0.0063,\n",
      "        0.0241, 0.0058, 0.0100, 0.0395, 0.0049, 0.0116, 0.0254, 0.0166, 0.0111,\n",
      "        0.0203, 0.0093, 0.0078, 0.0139, 0.0078, 0.0128, 0.0122, 0.0087, 0.0199,\n",
      "        0.0267, 0.0375, 0.0065, 0.0089, 0.0166, 0.0065, 0.0077, 0.0814, 0.0079,\n",
      "        0.0234, 0.0142, 0.0116, 0.0583, 0.0127], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 132: BSNE_Loss=0.1942,local_loss: 0.1941958593254591,global_loss: 0.3068336844444275\n",
      "Q: tensor([0.0025, 0.0083, 0.0034, 0.0209, 0.0161, 0.0617, 0.0110, 0.0449, 0.0106,\n",
      "        0.0911, 0.0148, 0.0103, 0.0319, 0.0079, 0.0146, 0.0117, 0.0115, 0.0198,\n",
      "        0.0092, 0.0239, 0.0105, 0.0053, 0.0123, 0.0080, 0.0340, 0.0168, 0.0266,\n",
      "        0.0113, 0.0062, 0.0037, 0.0109, 0.0350, 0.0112, 0.0066, 0.0206, 0.0216,\n",
      "        0.0196, 0.0076, 0.0285, 0.0278, 0.0043, 0.0130, 0.0100, 0.0101, 0.0068,\n",
      "        0.1229, 0.0266, 0.0338, 0.0109, 0.0117], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 133: BSNE_Loss=0.2583,local_loss: 0.25831176975569353,global_loss: 0.3843788206577301\n",
      "Q: tensor([0.0175, 0.0043, 0.0063, 0.0050, 0.0551, 0.0093, 0.0079, 0.0054, 0.0403,\n",
      "        0.0147, 0.0099, 0.0233, 0.0067, 0.0102, 0.0084, 0.0434, 0.0111, 0.0020,\n",
      "        0.0113, 0.0126, 0.0072, 0.0229, 0.0134, 0.0120, 0.0166, 0.0171, 0.0174,\n",
      "        0.0270, 0.0067, 0.0283, 0.0119, 0.0200, 0.0100, 0.0345, 0.0302, 0.0279,\n",
      "        0.0085, 0.0283, 0.0141, 0.0085, 0.0310, 0.0496, 0.0239, 0.0759, 0.0994,\n",
      "        0.0079, 0.0134, 0.0112, 0.0049, 0.0155], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 134: BSNE_Loss=0.2270,local_loss: 0.22696561222140382,global_loss: 0.38280701637268066\n",
      "Q: tensor([0.0039, 0.0107, 0.0408, 0.0170, 0.0034, 0.0076, 0.0034, 0.0471, 0.0209,\n",
      "        0.0101, 0.0139, 0.0381, 0.0081, 0.0130, 0.0204, 0.0490, 0.0072, 0.0256,\n",
      "        0.0065, 0.0204, 0.0059, 0.0111, 0.0171, 0.0248, 0.0169, 0.0768, 0.0038,\n",
      "        0.0065, 0.0382, 0.0311, 0.0337, 0.0232, 0.0163, 0.0270, 0.0100, 0.0063,\n",
      "        0.0158, 0.0094, 0.0226, 0.0033, 0.0127, 0.0429, 0.0133, 0.0122, 0.0110,\n",
      "        0.0177, 0.0095, 0.0840, 0.0203, 0.0095], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 135: BSNE_Loss=0.2230,local_loss: 0.22304920800030914,global_loss: 0.300842821598053\n",
      "Q: tensor([0.0085, 0.0209, 0.0108, 0.0189, 0.0222, 0.0594, 0.0048, 0.0089, 0.0287,\n",
      "        0.0122, 0.0070, 0.0050, 0.0138, 0.0192, 0.0037, 0.0189, 0.0457, 0.0188,\n",
      "        0.0493, 0.0273, 0.0051, 0.0084, 0.0198, 0.0236, 0.0125, 0.0121, 0.0346,\n",
      "        0.0129, 0.0177, 0.0092, 0.0177, 0.0067, 0.0383, 0.0092, 0.0051, 0.0288,\n",
      "        0.0164, 0.0260, 0.0043, 0.0506, 0.0093, 0.0082, 0.0085, 0.0120, 0.1059,\n",
      "        0.0302, 0.0244, 0.0121, 0.0116, 0.0148], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 136: BSNE_Loss=0.1971,local_loss: 0.19706767078810436,global_loss: 0.45711231231689453\n",
      "Q: tensor([0.0183, 0.0410, 0.0248, 0.0368, 0.0107, 0.0185, 0.0274, 0.0413, 0.0070,\n",
      "        0.0479, 0.0129, 0.0572, 0.0143, 0.0092, 0.0435, 0.0055, 0.0138, 0.0316,\n",
      "        0.0058, 0.0274, 0.0233, 0.0181, 0.0079, 0.0248, 0.0243, 0.0186, 0.0131,\n",
      "        0.0080, 0.0067, 0.0198, 0.0358, 0.0064, 0.0210, 0.0223, 0.0095, 0.0159,\n",
      "        0.0156, 0.0051, 0.0187, 0.0094, 0.0077, 0.0048, 0.0123, 0.0369, 0.0138,\n",
      "        0.0156, 0.0449, 0.0133, 0.0117, 0.0199], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 137: BSNE_Loss=0.1157,local_loss: 0.1157275741660922,global_loss: 0.5199297666549683\n",
      "Q: tensor([0.0156, 0.0428, 0.0389, 0.0093, 0.0060, 0.0169, 0.0041, 0.0160, 0.0613,\n",
      "        0.0578, 0.0072, 0.0241, 0.0341, 0.0205, 0.0240, 0.0085, 0.0048, 0.0144,\n",
      "        0.0135, 0.0072, 0.0083, 0.0697, 0.0159, 0.0305, 0.0152, 0.0145, 0.0224,\n",
      "        0.0168, 0.0047, 0.0051, 0.0136, 0.0106, 0.0192, 0.0073, 0.0073, 0.0222,\n",
      "        0.0268, 0.0744, 0.0137, 0.0055, 0.0170, 0.0223, 0.0063, 0.0236, 0.0268,\n",
      "        0.0090, 0.0130, 0.0073, 0.0322, 0.0119], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 138: BSNE_Loss=0.1893,local_loss: 0.18928120603253618,global_loss: 0.42155349254608154\n",
      "Q: tensor([0.0294, 0.0073, 0.0154, 0.0250, 0.0040, 0.0079, 0.0161, 0.0086, 0.0133,\n",
      "        0.0071, 0.0473, 0.0158, 0.0137, 0.0121, 0.0253, 0.0177, 0.0041, 0.0107,\n",
      "        0.0109, 0.0145, 0.0098, 0.0077, 0.0249, 0.0127, 0.0845, 0.0298, 0.0208,\n",
      "        0.0225, 0.0131, 0.0144, 0.0615, 0.0185, 0.0264, 0.0239, 0.0104, 0.0133,\n",
      "        0.0050, 0.0712, 0.0353, 0.0231, 0.0060, 0.0355, 0.0092, 0.0103, 0.0119,\n",
      "        0.0093, 0.0277, 0.0131, 0.0178, 0.0244], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 139: BSNE_Loss=0.1547,local_loss: 0.1546965773355305,global_loss: 0.5391713380813599\n",
      "Q: tensor([0.0114, 0.0329, 0.0123, 0.0154, 0.0217, 0.0325, 0.0580, 0.0160, 0.0164,\n",
      "        0.0042, 0.0082, 0.0094, 0.0084, 0.0063, 0.0387, 0.0084, 0.0193, 0.0088,\n",
      "        0.0071, 0.0353, 0.0166, 0.0288, 0.0150, 0.0126, 0.0287, 0.0276, 0.0126,\n",
      "        0.0270, 0.0047, 0.0119, 0.0264, 0.0212, 0.0362, 0.0393, 0.0046, 0.0045,\n",
      "        0.0099, 0.0661, 0.0253, 0.0109, 0.0235, 0.0215, 0.0275, 0.0276, 0.0068,\n",
      "        0.0152, 0.0106, 0.0470, 0.0144, 0.0052], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 140: BSNE_Loss=0.1460,local_loss: 0.1459609255031827,global_loss: 0.6143678426742554\n",
      "Q: tensor([0.0141, 0.0181, 0.0203, 0.0077, 0.0183, 0.0161, 0.0476, 0.0195, 0.0200,\n",
      "        0.0495, 0.0072, 0.0351, 0.0065, 0.0376, 0.0123, 0.0309, 0.0050, 0.0059,\n",
      "        0.0579, 0.0248, 0.0200, 0.0112, 0.0033, 0.0188, 0.0314, 0.0160, 0.0094,\n",
      "        0.0088, 0.0090, 0.0505, 0.0335, 0.0157, 0.0369, 0.0112, 0.0115, 0.0023,\n",
      "        0.0097, 0.0157, 0.0114, 0.0340, 0.0037, 0.0149, 0.0275, 0.0151, 0.0096,\n",
      "        0.0197, 0.0131, 0.0635, 0.0137, 0.0046], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 141: BSNE_Loss=0.1782,local_loss: 0.17815813494612484,global_loss: 0.4068802297115326\n",
      "Q: tensor([0.0193, 0.0157, 0.0146, 0.0149, 0.0104, 0.0149, 0.0125, 0.0029, 0.0428,\n",
      "        0.0079, 0.0086, 0.0201, 0.0099, 0.0134, 0.0294, 0.0325, 0.0273, 0.0299,\n",
      "        0.0109, 0.0018, 0.0275, 0.0074, 0.0089, 0.0427, 0.0036, 0.0200, 0.0068,\n",
      "        0.0199, 0.0127, 0.0168, 0.0506, 0.0246, 0.0249, 0.0157, 0.0282, 0.0126,\n",
      "        0.0151, 0.0462, 0.0096, 0.0110, 0.0261, 0.0454, 0.0200, 0.0293, 0.0159,\n",
      "        0.0251, 0.0112, 0.0325, 0.0463, 0.0038], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 142: BSNE_Loss=0.1389,local_loss: 0.13889975233065904,global_loss: 0.21696001291275024\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0142, 0.0079, 0.0725, 0.0165, 0.0357, 0.0084, 0.0130, 0.0080, 0.0277,\n",
      "        0.0264, 0.0283, 0.0156, 0.0326, 0.0126, 0.0104, 0.0179, 0.0042, 0.0037,\n",
      "        0.0082, 0.0146, 0.0133, 0.0125, 0.0183, 0.0474, 0.0335, 0.0602, 0.0142,\n",
      "        0.0060, 0.0161, 0.0224, 0.0260, 0.0178, 0.0262, 0.0277, 0.0090, 0.0038,\n",
      "        0.0093, 0.0125, 0.0128, 0.0277, 0.0082, 0.0044, 0.0172, 0.0102, 0.0419,\n",
      "        0.0225, 0.0390, 0.0224, 0.0189, 0.0199], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 143: BSNE_Loss=0.1374,local_loss: 0.13741542596673906,global_loss: 0.3346310555934906\n",
      "Q: tensor([0.0213, 0.0558, 0.0185, 0.0232, 0.0287, 0.0364, 0.0083, 0.0206, 0.0795,\n",
      "        0.0197, 0.0145, 0.0228, 0.0470, 0.0083, 0.0087, 0.0080, 0.0271, 0.0091,\n",
      "        0.0138, 0.0194, 0.0138, 0.0064, 0.0709, 0.0175, 0.0094, 0.0104, 0.0146,\n",
      "        0.0087, 0.0063, 0.0307, 0.0174, 0.0048, 0.0037, 0.0070, 0.0100, 0.0100,\n",
      "        0.0206, 0.0271, 0.0140, 0.0227, 0.0152, 0.0341, 0.0111, 0.0121, 0.0103,\n",
      "        0.0266, 0.0060, 0.0082, 0.0071, 0.0523], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 144: BSNE_Loss=0.1711,local_loss: 0.17113778818483333,global_loss: 0.6802749037742615\n",
      "Q: tensor([0.0224, 0.0230, 0.0248, 0.0543, 0.0167, 0.0145, 0.0215, 0.0191, 0.0257,\n",
      "        0.0141, 0.0612, 0.0126, 0.0099, 0.0134, 0.0197, 0.0426, 0.0170, 0.0091,\n",
      "        0.0141, 0.0222, 0.0153, 0.0061, 0.0043, 0.0085, 0.0256, 0.0115, 0.0450,\n",
      "        0.0185, 0.0113, 0.0229, 0.0152, 0.0149, 0.0118, 0.0163, 0.0150, 0.0042,\n",
      "        0.0408, 0.0397, 0.0277, 0.0100, 0.0098, 0.0240, 0.0093, 0.0113, 0.0220,\n",
      "        0.0332, 0.0040, 0.0117, 0.0177, 0.0347], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 145: BSNE_Loss=0.0944,local_loss: 0.09437731988492704,global_loss: 0.5850696563720703\n",
      "Q: tensor([0.0220, 0.0240, 0.0069, 0.0056, 0.0068, 0.0087, 0.0220, 0.0081, 0.0145,\n",
      "        0.0363, 0.0145, 0.0187, 0.0516, 0.0125, 0.0106, 0.0089, 0.0094, 0.0178,\n",
      "        0.0092, 0.0533, 0.0105, 0.0224, 0.0314, 0.0168, 0.0077, 0.0188, 0.0333,\n",
      "        0.0292, 0.0275, 0.0191, 0.0175, 0.0255, 0.0262, 0.0248, 0.0114, 0.0108,\n",
      "        0.0205, 0.0170, 0.0559, 0.0194, 0.0175, 0.0131, 0.0212, 0.0056, 0.0198,\n",
      "        0.0302, 0.0274, 0.0433, 0.0092, 0.0058], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 146: BSNE_Loss=0.0883,local_loss: 0.08827886754200436,global_loss: 0.45930925011634827\n",
      "Q: tensor([0.0233, 0.0222, 0.0230, 0.0212, 0.0111, 0.0243, 0.0126, 0.0295, 0.0166,\n",
      "        0.0223, 0.0034, 0.0133, 0.0207, 0.0232, 0.0222, 0.0153, 0.0176, 0.0119,\n",
      "        0.0244, 0.0100, 0.0188, 0.0247, 0.0097, 0.0079, 0.0371, 0.0202, 0.0206,\n",
      "        0.0146, 0.0214, 0.0304, 0.0217, 0.0053, 0.0141, 0.0082, 0.0105, 0.0096,\n",
      "        0.0099, 0.0422, 0.0159, 0.0249, 0.0480, 0.0474, 0.0172, 0.0254, 0.0078,\n",
      "        0.0066, 0.0375, 0.0348, 0.0213, 0.0180], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 147: BSNE_Loss=0.0537,local_loss: 0.053673902508643956,global_loss: 0.5510004758834839\n",
      "Q: tensor([0.0496, 0.0165, 0.0104, 0.0251, 0.0198, 0.0203, 0.0100, 0.0154, 0.0050,\n",
      "        0.0234, 0.0300, 0.0037, 0.0360, 0.0329, 0.0082, 0.0137, 0.0108, 0.0219,\n",
      "        0.0111, 0.0095, 0.0029, 0.0613, 0.0085, 0.0176, 0.0046, 0.0118, 0.0142,\n",
      "        0.0064, 0.0189, 0.0039, 0.0160, 0.0799, 0.0279, 0.0050, 0.0067, 0.0404,\n",
      "        0.0085, 0.0075, 0.0066, 0.0170, 0.0283, 0.0548, 0.0223, 0.0288, 0.0054,\n",
      "        0.0587, 0.0176, 0.0109, 0.0201, 0.0146], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 148: BSNE_Loss=0.2162,local_loss: 0.21621707820298552,global_loss: 0.36911797523498535\n",
      "Q: tensor([0.0048, 0.0569, 0.0272, 0.0131, 0.0110, 0.0084, 0.0184, 0.0309, 0.0138,\n",
      "        0.0038, 0.0200, 0.0115, 0.0619, 0.0088, 0.0100, 0.0265, 0.0206, 0.0127,\n",
      "        0.0319, 0.0098, 0.0084, 0.0235, 0.0313, 0.0056, 0.0194, 0.0209, 0.0233,\n",
      "        0.0221, 0.0205, 0.0274, 0.0182, 0.0059, 0.0102, 0.0251, 0.0075, 0.0134,\n",
      "        0.0181, 0.0089, 0.0193, 0.0123, 0.0051, 0.0068, 0.0167, 0.0072, 0.0314,\n",
      "        0.0391, 0.0287, 0.0347, 0.0757, 0.0113], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 149: BSNE_Loss=0.1414,local_loss: 0.14137292580491112,global_loss: 0.1680268496274948\n",
      "Q: tensor([0.0171, 0.0583, 0.0188, 0.0130, 0.0648, 0.0386, 0.0131, 0.0115, 0.0228,\n",
      "        0.0090, 0.0107, 0.0218, 0.0036, 0.0067, 0.0124, 0.0075, 0.0038, 0.0152,\n",
      "        0.0196, 0.0435, 0.0053, 0.0196, 0.0149, 0.0335, 0.0095, 0.0124, 0.0098,\n",
      "        0.1084, 0.0170, 0.0079, 0.0239, 0.0028, 0.0244, 0.0094, 0.0136, 0.0070,\n",
      "        0.0084, 0.0490, 0.0125, 0.0246, 0.0051, 0.0127, 0.0133, 0.0156, 0.0351,\n",
      "        0.0238, 0.0330, 0.0090, 0.0042, 0.0224], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 150: BSNE_Loss=0.2214,local_loss: 0.22140214308833728,global_loss: 0.3246508836746216\n",
      "Q: tensor([0.0273, 0.0232, 0.0077, 0.0062, 0.0181, 0.0240, 0.0068, 0.0116, 0.0145,\n",
      "        0.0087, 0.0109, 0.0033, 0.0083, 0.0236, 0.0127, 0.0076, 0.0058, 0.0120,\n",
      "        0.0036, 0.0264, 0.0048, 0.0110, 0.0557, 0.0207, 0.0071, 0.0090, 0.0243,\n",
      "        0.0735, 0.0158, 0.0427, 0.0512, 0.0653, 0.0265, 0.0562, 0.0099, 0.0111,\n",
      "        0.0200, 0.0094, 0.0058, 0.0121, 0.0093, 0.0390, 0.0028, 0.0189, 0.0040,\n",
      "        0.0314, 0.0114, 0.0406, 0.0264, 0.0216], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 151: BSNE_Loss=0.2346,local_loss: 0.23456702237883825,global_loss: 0.40716415643692017\n",
      "Q: tensor([0.0128, 0.0070, 0.0106, 0.0084, 0.0175, 0.0438, 0.0135, 0.0268, 0.0129,\n",
      "        0.0053, 0.0094, 0.0056, 0.0309, 0.0028, 0.0372, 0.0131, 0.0116, 0.0158,\n",
      "        0.0138, 0.0148, 0.0032, 0.0328, 0.0042, 0.0088, 0.0622, 0.0023, 0.0459,\n",
      "        0.0118, 0.0083, 0.0192, 0.0293, 0.0097, 0.0050, 0.0310, 0.0328, 0.0060,\n",
      "        0.0096, 0.0063, 0.0110, 0.0059, 0.0369, 0.0128, 0.0061, 0.0043, 0.0163,\n",
      "        0.0131, 0.1990, 0.0215, 0.0194, 0.0122], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 152: BSNE_Loss=0.3451,local_loss: 0.3450548219460643,global_loss: 0.3644363284111023\n",
      "Q: tensor([0.0049, 0.0263, 0.0389, 0.0173, 0.0235, 0.0184, 0.0122, 0.0047, 0.0088,\n",
      "        0.0237, 0.0131, 0.0261, 0.0217, 0.0153, 0.0067, 0.0247, 0.0123, 0.0517,\n",
      "        0.0069, 0.0203, 0.0468, 0.0157, 0.0072, 0.0489, 0.0037, 0.0360, 0.0150,\n",
      "        0.0155, 0.0322, 0.0063, 0.0136, 0.0159, 0.0209, 0.0123, 0.0215, 0.0065,\n",
      "        0.0159, 0.0097, 0.0136, 0.0123, 0.0206, 0.0165, 0.0139, 0.0065, 0.0076,\n",
      "        0.0112, 0.1273, 0.0215, 0.0124, 0.0154], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 153: BSNE_Loss=0.1694,local_loss: 0.16939936375862572,global_loss: 0.4021669626235962\n",
      "Q: tensor([0.0128, 0.0445, 0.0250, 0.0290, 0.0045, 0.1118, 0.0064, 0.0089, 0.0226,\n",
      "        0.0060, 0.0180, 0.0245, 0.0191, 0.0090, 0.0076, 0.0123, 0.0101, 0.0088,\n",
      "        0.0281, 0.0021, 0.0042, 0.0144, 0.0227, 0.0157, 0.0091, 0.0131, 0.0221,\n",
      "        0.0117, 0.0416, 0.0288, 0.0304, 0.0105, 0.0174, 0.0084, 0.0231, 0.0026,\n",
      "        0.0363, 0.0150, 0.0138, 0.0075, 0.0114, 0.0093, 0.0095, 0.0198, 0.0151,\n",
      "        0.0107, 0.0204, 0.0643, 0.0166, 0.0634], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 154: BSNE_Loss=0.2156,local_loss: 0.21563722100258972,global_loss: 0.24546471238136292\n",
      "Q: tensor([0.0147, 0.0211, 0.0184, 0.0432, 0.0122, 0.0073, 0.0182, 0.0756, 0.0210,\n",
      "        0.0321, 0.0233, 0.0078, 0.0250, 0.0078, 0.0165, 0.0113, 0.0147, 0.0279,\n",
      "        0.0192, 0.0184, 0.0033, 0.0960, 0.0090, 0.0319, 0.0050, 0.0102, 0.0160,\n",
      "        0.0379, 0.0092, 0.0198, 0.0196, 0.0339, 0.0108, 0.0551, 0.0066, 0.0135,\n",
      "        0.0073, 0.0047, 0.0124, 0.0046, 0.0022, 0.0125, 0.0292, 0.0608, 0.0163,\n",
      "        0.0112, 0.0028, 0.0147, 0.0056, 0.0026], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 155: BSNE_Loss=0.2608,local_loss: 0.2607531765496368,global_loss: 0.20505869388580322\n",
      "Q: tensor([0.0128, 0.0035, 0.0090, 0.0136, 0.0158, 0.0168, 0.0065, 0.0119, 0.0102,\n",
      "        0.0131, 0.0241, 0.0075, 0.0650, 0.0115, 0.0142, 0.0159, 0.0082, 0.0375,\n",
      "        0.0301, 0.0220, 0.0052, 0.0137, 0.0153, 0.0160, 0.0543, 0.0111, 0.0286,\n",
      "        0.0060, 0.0153, 0.0121, 0.0067, 0.0028, 0.0067, 0.0188, 0.0107, 0.0039,\n",
      "        0.0305, 0.0268, 0.0322, 0.0071, 0.0040, 0.1773, 0.0140, 0.0202, 0.0095,\n",
      "        0.0331, 0.0259, 0.0294, 0.0049, 0.0089], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 156: BSNE_Loss=0.2820,local_loss: 0.28197646866179304,global_loss: 0.4186834394931793\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0162, 0.0278, 0.0096, 0.0055, 0.0174, 0.0515, 0.0036, 0.0383, 0.0312,\n",
      "        0.0218, 0.1045, 0.0231, 0.0105, 0.0133, 0.0145, 0.0095, 0.0129, 0.0129,\n",
      "        0.0077, 0.0037, 0.0045, 0.0121, 0.0288, 0.0183, 0.0071, 0.0101, 0.1019,\n",
      "        0.0072, 0.0246, 0.0117, 0.0250, 0.0153, 0.0322, 0.0051, 0.0106, 0.0398,\n",
      "        0.0083, 0.0129, 0.0175, 0.0119, 0.0204, 0.0102, 0.0260, 0.0250, 0.0112,\n",
      "        0.0040, 0.0094, 0.0380, 0.0130, 0.0023], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 157: BSNE_Loss=0.2459,local_loss: 0.2458609041020041,global_loss: 0.6672606468200684\n",
      "Q: tensor([0.0113, 0.0118, 0.0305, 0.0073, 0.0197, 0.0372, 0.0133, 0.0053, 0.0297,\n",
      "        0.0209, 0.0141, 0.0442, 0.0615, 0.0074, 0.0095, 0.0074, 0.0047, 0.0076,\n",
      "        0.0293, 0.0356, 0.0150, 0.0218, 0.0079, 0.0107, 0.0260, 0.0138, 0.0485,\n",
      "        0.0373, 0.0116, 0.0256, 0.0033, 0.0089, 0.0107, 0.0265, 0.0062, 0.0390,\n",
      "        0.0369, 0.0350, 0.0313, 0.0083, 0.0309, 0.0167, 0.0176, 0.0147, 0.0136,\n",
      "        0.0131, 0.0189, 0.0104, 0.0037, 0.0281], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 158: BSNE_Loss=0.1438,local_loss: 0.1438289459196642,global_loss: 0.5525268316268921\n",
      "Q: tensor([0.0175, 0.0068, 0.0446, 0.0486, 0.0046, 0.0211, 0.0319, 0.0296, 0.0479,\n",
      "        0.0530, 0.0740, 0.0284, 0.0040, 0.0110, 0.0136, 0.0075, 0.0040, 0.0302,\n",
      "        0.0039, 0.0060, 0.0181, 0.0079, 0.0182, 0.0052, 0.0221, 0.0102, 0.0128,\n",
      "        0.0030, 0.0153, 0.0085, 0.0306, 0.0151, 0.0181, 0.0098, 0.0092, 0.0459,\n",
      "        0.0172, 0.0051, 0.0146, 0.0178, 0.0182, 0.0054, 0.0279, 0.0208, 0.0105,\n",
      "        0.0649, 0.0190, 0.0147, 0.0187, 0.0067], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 159: BSNE_Loss=0.2194,local_loss: 0.21943246452474477,global_loss: 0.6174119114875793\n",
      "Q: tensor([0.0205, 0.0064, 0.0204, 0.0216, 0.0077, 0.0436, 0.0384, 0.0143, 0.0142,\n",
      "        0.0076, 0.0121, 0.0310, 0.0266, 0.0066, 0.0134, 0.0113, 0.0162, 0.0347,\n",
      "        0.0076, 0.0329, 0.0088, 0.0121, 0.0146, 0.0183, 0.0236, 0.0048, 0.0322,\n",
      "        0.0131, 0.0086, 0.0224, 0.0077, 0.0127, 0.0072, 0.0214, 0.0173, 0.0151,\n",
      "        0.0234, 0.0090, 0.0106, 0.0228, 0.0980, 0.0315, 0.0200, 0.0083, 0.0315,\n",
      "        0.0240, 0.0200, 0.0305, 0.0193, 0.0239], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 160: BSNE_Loss=0.1030,local_loss: 0.10300091811034118,global_loss: 0.3344249129295349\n",
      "Q: tensor([0.0448, 0.0352, 0.0354, 0.0385, 0.0364, 0.0038, 0.0122, 0.0269, 0.0153,\n",
      "        0.0188, 0.0138, 0.0106, 0.0111, 0.0375, 0.0229, 0.0099, 0.0105, 0.0189,\n",
      "        0.0183, 0.0109, 0.0109, 0.0431, 0.0193, 0.0139, 0.0107, 0.0057, 0.0384,\n",
      "        0.0219, 0.0178, 0.0098, 0.0116, 0.0413, 0.0091, 0.0089, 0.0128, 0.0054,\n",
      "        0.0150, 0.0096, 0.0251, 0.0126, 0.0198, 0.0268, 0.0228, 0.0236, 0.0099,\n",
      "        0.0365, 0.0139, 0.0453, 0.0154, 0.0117], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 161: BSNE_Loss=0.0836,local_loss: 0.08361166616301573,global_loss: 0.5823929309844971\n",
      "Q: tensor([0.0131, 0.0184, 0.0156, 0.0318, 0.0178, 0.0045, 0.0143, 0.0128, 0.0351,\n",
      "        0.0037, 0.0345, 0.0123, 0.0099, 0.0161, 0.0242, 0.0109, 0.0096, 0.0294,\n",
      "        0.0057, 0.0168, 0.0430, 0.0228, 0.0165, 0.0349, 0.0297, 0.0068, 0.0446,\n",
      "        0.0251, 0.0053, 0.0236, 0.0176, 0.0401, 0.0248, 0.0363, 0.0166, 0.0224,\n",
      "        0.0192, 0.0109, 0.0123, 0.0253, 0.0133, 0.0118, 0.0054, 0.0305, 0.0161,\n",
      "        0.0130, 0.0148, 0.0410, 0.0174, 0.0225], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 162: BSNE_Loss=0.0768,local_loss: 0.07684939459403962,global_loss: 0.3248380124568939\n",
      "Q: tensor([0.0388, 0.0263, 0.1096, 0.0093, 0.0235, 0.0066, 0.0334, 0.0261, 0.0330,\n",
      "        0.0068, 0.0121, 0.0246, 0.0085, 0.0139, 0.0345, 0.0175, 0.0047, 0.0056,\n",
      "        0.0207, 0.0343, 0.0039, 0.0255, 0.0136, 0.0353, 0.0296, 0.0060, 0.0240,\n",
      "        0.0024, 0.0210, 0.0052, 0.0145, 0.0536, 0.0091, 0.0264, 0.0217, 0.0085,\n",
      "        0.0315, 0.0084, 0.0084, 0.0097, 0.0246, 0.0132, 0.0164, 0.0105, 0.0091,\n",
      "        0.0179, 0.0155, 0.0331, 0.0068, 0.0047], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 163: BSNE_Loss=0.1997,local_loss: 0.19967205470886615,global_loss: 0.2108059823513031\n",
      "Q: tensor([0.0196, 0.0030, 0.0147, 0.0306, 0.0132, 0.1286, 0.0046, 0.0157, 0.0535,\n",
      "        0.0120, 0.0078, 0.0233, 0.0137, 0.0738, 0.0176, 0.0051, 0.0047, 0.0152,\n",
      "        0.0240, 0.0242, 0.0096, 0.0041, 0.0137, 0.0137, 0.0105, 0.0157, 0.0083,\n",
      "        0.0037, 0.0035, 0.0121, 0.0705, 0.0138, 0.0031, 0.0065, 0.0048, 0.0206,\n",
      "        0.0219, 0.0061, 0.0072, 0.0151, 0.0235, 0.0533, 0.0125, 0.0055, 0.0293,\n",
      "        0.0091, 0.0232, 0.0388, 0.0075, 0.0278], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 164: BSNE_Loss=0.3115,local_loss: 0.3115147088309542,global_loss: 0.41424909234046936\n",
      "Q: tensor([0.0098, 0.0192, 0.0085, 0.0171, 0.0233, 0.0078, 0.0100, 0.0204, 0.0205,\n",
      "        0.0173, 0.0178, 0.0111, 0.0177, 0.0150, 0.0249, 0.0077, 0.0097, 0.0954,\n",
      "        0.0248, 0.0156, 0.0230, 0.0136, 0.0219, 0.0077, 0.0103, 0.0652, 0.0122,\n",
      "        0.0051, 0.0255, 0.0713, 0.0113, 0.0096, 0.0114, 0.0253, 0.0051, 0.0189,\n",
      "        0.0128, 0.0335, 0.0406, 0.0136, 0.0131, 0.0163, 0.0094, 0.0302, 0.0050,\n",
      "        0.0174, 0.0204, 0.0220, 0.0196, 0.0152], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 165: BSNE_Loss=0.1283,local_loss: 0.12827177313497257,global_loss: 0.33175379037857056\n",
      "Q: tensor([0.1022, 0.0257, 0.0091, 0.0068, 0.0216, 0.0693, 0.0253, 0.0386, 0.0098,\n",
      "        0.0078, 0.0197, 0.0026, 0.0016, 0.0064, 0.0120, 0.0169, 0.0084, 0.0058,\n",
      "        0.0195, 0.0051, 0.0430, 0.0169, 0.0374, 0.0256, 0.0214, 0.0174, 0.0303,\n",
      "        0.0089, 0.0229, 0.0060, 0.0134, 0.0140, 0.0049, 0.0163, 0.0130, 0.0051,\n",
      "        0.0234, 0.0317, 0.0214, 0.0129, 0.0286, 0.0199, 0.0112, 0.0401, 0.0193,\n",
      "        0.0224, 0.0107, 0.0152, 0.0203, 0.0121], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 166: BSNE_Loss=0.1976,local_loss: 0.1976207821334856,global_loss: 0.6078333258628845\n",
      "Q: tensor([0.0186, 0.0140, 0.0277, 0.0030, 0.0172, 0.0153, 0.0288, 0.0260, 0.0144,\n",
      "        0.0095, 0.0152, 0.0143, 0.0146, 0.0079, 0.0389, 0.0265, 0.0092, 0.0315,\n",
      "        0.0501, 0.0051, 0.0085, 0.0072, 0.0299, 0.1089, 0.0074, 0.0060, 0.0059,\n",
      "        0.0115, 0.0462, 0.0117, 0.0246, 0.0140, 0.0072, 0.0171, 0.0276, 0.0102,\n",
      "        0.0076, 0.0151, 0.0607, 0.0164, 0.0084, 0.0161, 0.0041, 0.0105, 0.0545,\n",
      "        0.0385, 0.0126, 0.0094, 0.0043, 0.0102], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 167: BSNE_Loss=0.2166,local_loss: 0.21660313527111907,global_loss: 0.1626240462064743\n",
      "Q: tensor([0.0200, 0.0343, 0.0124, 0.0130, 0.0146, 0.0104, 0.0076, 0.0195, 0.0149,\n",
      "        0.0183, 0.0363, 0.0074, 0.0125, 0.0172, 0.0353, 0.0067, 0.0156, 0.0125,\n",
      "        0.0220, 0.0241, 0.0280, 0.0047, 0.0162, 0.0331, 0.0095, 0.0162, 0.0060,\n",
      "        0.0192, 0.0068, 0.0113, 0.0048, 0.0278, 0.0260, 0.0107, 0.0327, 0.0199,\n",
      "        0.0103, 0.0069, 0.0080, 0.0066, 0.0323, 0.0204, 0.0318, 0.0314, 0.0155,\n",
      "        0.0064, 0.1132, 0.0464, 0.0275, 0.0155], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 168: BSNE_Loss=0.1430,local_loss: 0.14299626115609296,global_loss: 0.5488343238830566\n",
      "Q: tensor([0.0326, 0.0163, 0.0231, 0.0495, 0.0113, 0.0532, 0.0168, 0.0473, 0.0103,\n",
      "        0.0101, 0.0155, 0.0174, 0.0073, 0.0205, 0.0355, 0.0068, 0.0189, 0.0137,\n",
      "        0.0115, 0.0141, 0.0202, 0.0102, 0.0092, 0.0229, 0.0096, 0.0133, 0.0135,\n",
      "        0.0129, 0.0152, 0.0137, 0.0077, 0.0368, 0.0554, 0.0285, 0.0523, 0.0418,\n",
      "        0.0207, 0.0140, 0.0176, 0.0073, 0.0110, 0.0085, 0.0105, 0.0290, 0.0091,\n",
      "        0.0139, 0.0260, 0.0120, 0.0099, 0.0159], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 169: BSNE_Loss=0.0924,local_loss: 0.09244679001684379,global_loss: 0.6421140432357788\n",
      "Q: tensor([0.0142, 0.0474, 0.0471, 0.0258, 0.0219, 0.0034, 0.0102, 0.0071, 0.0212,\n",
      "        0.0146, 0.0197, 0.0047, 0.0083, 0.0251, 0.0160, 0.0052, 0.0124, 0.0198,\n",
      "        0.0291, 0.0708, 0.0289, 0.0813, 0.0052, 0.0169, 0.0428, 0.0099, 0.0050,\n",
      "        0.0037, 0.0166, 0.0225, 0.0243, 0.0192, 0.0123, 0.0088, 0.0399, 0.0136,\n",
      "        0.0127, 0.0313, 0.0084, 0.0180, 0.0282, 0.0284, 0.0150, 0.0068, 0.0121,\n",
      "        0.0234, 0.0205, 0.0046, 0.0030, 0.0126], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 170: BSNE_Loss=0.1958,local_loss: 0.19577624209013614,global_loss: 0.4534239172935486\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0086, 0.0157, 0.0179, 0.0229, 0.0424, 0.0078, 0.0442, 0.0135, 0.0745,\n",
      "        0.0197, 0.0292, 0.0104, 0.0181, 0.0244, 0.0257, 0.0128, 0.0180, 0.0277,\n",
      "        0.0056, 0.0431, 0.0170, 0.0297, 0.0218, 0.0107, 0.0120, 0.0086, 0.0069,\n",
      "        0.0102, 0.0069, 0.0249, 0.0053, 0.0153, 0.0076, 0.0265, 0.0055, 0.0074,\n",
      "        0.0145, 0.0199, 0.0231, 0.0220, 0.0380, 0.0644, 0.0111, 0.0061, 0.0187,\n",
      "        0.0121, 0.0163, 0.0267, 0.0057, 0.0229], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 171: BSNE_Loss=0.1285,local_loss: 0.12849103649996646,global_loss: 0.6335386037826538\n",
      "Q: tensor([0.0149, 0.0143, 0.0169, 0.0163, 0.0051, 0.0945, 0.0233, 0.0263, 0.0368,\n",
      "        0.0047, 0.0220, 0.0291, 0.0027, 0.0077, 0.0067, 0.0044, 0.0104, 0.0195,\n",
      "        0.0066, 0.0553, 0.0134, 0.0187, 0.0080, 0.0247, 0.0356, 0.0110, 0.0043,\n",
      "        0.0106, 0.0089, 0.0145, 0.0058, 0.0243, 0.0164, 0.0185, 0.0304, 0.0433,\n",
      "        0.0092, 0.0124, 0.0152, 0.0079, 0.0197, 0.0275, 0.0222, 0.0266, 0.0122,\n",
      "        0.0154, 0.0234, 0.0089, 0.0419, 0.0515], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 172: BSNE_Loss=0.1788,local_loss: 0.17875100960599474,global_loss: 0.5087225437164307\n",
      "Q: tensor([0.0151, 0.0116, 0.0083, 0.0256, 0.0058, 0.0521, 0.0159, 0.0213, 0.0109,\n",
      "        0.0578, 0.0042, 0.0533, 0.0263, 0.0464, 0.0070, 0.0095, 0.0174, 0.0094,\n",
      "        0.0213, 0.0178, 0.0104, 0.0138, 0.0071, 0.0225, 0.0427, 0.0093, 0.0219,\n",
      "        0.0088, 0.0076, 0.0133, 0.0152, 0.0133, 0.0178, 0.0271, 0.0290, 0.0058,\n",
      "        0.0161, 0.0428, 0.0121, 0.0141, 0.0099, 0.0669, 0.0048, 0.0082, 0.0060,\n",
      "        0.0053, 0.0092, 0.0189, 0.0437, 0.0392], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 173: BSNE_Loss=0.1768,local_loss: 0.17679712948939844,global_loss: 0.4842526614665985\n",
      "Q: tensor([0.0145, 0.0373, 0.0442, 0.0492, 0.0173, 0.0096, 0.0175, 0.0121, 0.0169,\n",
      "        0.0480, 0.0177, 0.0068, 0.0121, 0.0140, 0.0297, 0.0232, 0.0130, 0.0267,\n",
      "        0.0682, 0.0353, 0.0056, 0.0152, 0.0176, 0.0076, 0.0125, 0.0088, 0.0188,\n",
      "        0.0099, 0.0062, 0.0132, 0.0339, 0.0065, 0.0064, 0.0323, 0.0044, 0.0144,\n",
      "        0.0027, 0.0332, 0.0185, 0.0097, 0.0133, 0.0400, 0.0046, 0.0163, 0.0100,\n",
      "        0.0168, 0.0406, 0.0432, 0.0159, 0.0087], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 174: BSNE_Loss=0.1585,local_loss: 0.15851682899783737,global_loss: 0.41122227907180786\n",
      "Q: tensor([0.0132, 0.0095, 0.0256, 0.0293, 0.0235, 0.0187, 0.0059, 0.0156, 0.0335,\n",
      "        0.0270, 0.0068, 0.0220, 0.0083, 0.0580, 0.0178, 0.0064, 0.0899, 0.0137,\n",
      "        0.0291, 0.0199, 0.0296, 0.0584, 0.0223, 0.0087, 0.0348, 0.0208, 0.0155,\n",
      "        0.0117, 0.0042, 0.0236, 0.0055, 0.0238, 0.0117, 0.0124, 0.0059, 0.0110,\n",
      "        0.0062, 0.0298, 0.0210, 0.0227, 0.0070, 0.0058, 0.0148, 0.0157, 0.0208,\n",
      "        0.0062, 0.0051, 0.0407, 0.0075, 0.0229], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 175: BSNE_Loss=0.1646,local_loss: 0.16455469226049455,global_loss: 0.5508391857147217\n",
      "Q: tensor([0.0259, 0.0066, 0.0054, 0.0072, 0.0083, 0.0086, 0.0079, 0.0205, 0.0164,\n",
      "        0.0273, 0.0419, 0.0265, 0.0196, 0.0086, 0.0093, 0.0339, 0.0069, 0.0563,\n",
      "        0.0278, 0.0360, 0.0065, 0.0103, 0.0100, 0.0067, 0.0074, 0.0352, 0.0085,\n",
      "        0.0175, 0.0149, 0.0223, 0.0162, 0.0166, 0.0674, 0.0371, 0.0251, 0.0108,\n",
      "        0.0104, 0.0307, 0.0211, 0.0203, 0.0209, 0.0151, 0.0311, 0.0121, 0.0279,\n",
      "        0.0191, 0.0181, 0.0267, 0.0097, 0.0238], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 176: BSNE_Loss=0.1064,local_loss: 0.10639453529558805,global_loss: 0.2802931070327759\n",
      "Q: tensor([0.0409, 0.0123, 0.0095, 0.0370, 0.0170, 0.0623, 0.0227, 0.0179, 0.0088,\n",
      "        0.0092, 0.0161, 0.0228, 0.0074, 0.0062, 0.0147, 0.0083, 0.0365, 0.0110,\n",
      "        0.0255, 0.0114, 0.0106, 0.0106, 0.0418, 0.0157, 0.0074, 0.0260, 0.0048,\n",
      "        0.0144, 0.0158, 0.0092, 0.0097, 0.0391, 0.0178, 0.0053, 0.0363, 0.0540,\n",
      "        0.0063, 0.0059, 0.0379, 0.0082, 0.0103, 0.0157, 0.0120, 0.0188, 0.0188,\n",
      "        0.0308, 0.0240, 0.0534, 0.0359, 0.0056], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 177: BSNE_Loss=0.1480,local_loss: 0.14798984496283507,global_loss: 0.49550408124923706\n",
      "Q: tensor([0.0196, 0.0049, 0.0140, 0.0040, 0.0246, 0.0102, 0.0031, 0.0052, 0.0056,\n",
      "        0.0027, 0.0108, 0.0092, 0.0369, 0.0267, 0.0086, 0.0080, 0.0067, 0.0060,\n",
      "        0.0039, 0.0196, 0.0112, 0.0085, 0.0106, 0.2244, 0.0115, 0.0754, 0.0040,\n",
      "        0.0118, 0.1195, 0.0132, 0.0048, 0.0270, 0.0087, 0.0014, 0.0080, 0.0031,\n",
      "        0.0128, 0.0123, 0.0132, 0.0944, 0.0080, 0.0082, 0.0071, 0.0079, 0.0086,\n",
      "        0.0021, 0.0035, 0.0181, 0.0050, 0.0259], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 178: BSNE_Loss=0.5815,local_loss: 0.58146628507245,global_loss: -0.05449480190873146\n",
      "Q: tensor([0.0524, 0.0150, 0.0105, 0.0121, 0.0110, 0.0355, 0.0138, 0.0321, 0.0087,\n",
      "        0.0203, 0.0547, 0.0401, 0.0188, 0.0140, 0.0139, 0.0040, 0.0135, 0.0348,\n",
      "        0.0404, 0.0156, 0.0099, 0.0260, 0.0087, 0.0190, 0.0113, 0.0299, 0.0180,\n",
      "        0.0116, 0.0064, 0.0050, 0.0283, 0.0073, 0.0298, 0.0285, 0.0065, 0.0091,\n",
      "        0.0279, 0.0228, 0.0267, 0.0084, 0.0363, 0.0230, 0.0091, 0.0093, 0.0220,\n",
      "        0.0129, 0.0047, 0.0364, 0.0301, 0.0139], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 179: BSNE_Loss=0.1119,local_loss: 0.11185755434581218,global_loss: 0.49377453327178955\n",
      "Q: tensor([0.0325, 0.0244, 0.0040, 0.0152, 0.0037, 0.0250, 0.0066, 0.0154, 0.0491,\n",
      "        0.0165, 0.0124, 0.0203, 0.0115, 0.0158, 0.0140, 0.0102, 0.0152, 0.0056,\n",
      "        0.0380, 0.0055, 0.0069, 0.0096, 0.0267, 0.0143, 0.0438, 0.0289, 0.0158,\n",
      "        0.0325, 0.0124, 0.0191, 0.0092, 0.0280, 0.0145, 0.0049, 0.0221, 0.0040,\n",
      "        0.0126, 0.0494, 0.0308, 0.0122, 0.0118, 0.0305, 0.0062, 0.0073, 0.0805,\n",
      "        0.0321, 0.0492, 0.0099, 0.0166, 0.0172], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 180: BSNE_Loss=0.1670,local_loss: 0.1669927930568375,global_loss: 0.4208030700683594\n",
      "Q: tensor([0.0178, 0.0035, 0.0330, 0.0235, 0.0184, 0.0338, 0.0174, 0.0074, 0.0158,\n",
      "        0.0222, 0.0209, 0.0265, 0.0219, 0.0299, 0.0374, 0.0070, 0.0317, 0.0090,\n",
      "        0.0079, 0.0055, 0.0071, 0.0133, 0.0310, 0.0633, 0.0088, 0.0084, 0.0175,\n",
      "        0.0109, 0.0204, 0.0070, 0.0341, 0.0103, 0.0107, 0.0225, 0.0155, 0.0127,\n",
      "        0.0106, 0.0295, 0.0052, 0.0112, 0.0067, 0.0837, 0.0322, 0.0127, 0.0106,\n",
      "        0.0478, 0.0150, 0.0070, 0.0318, 0.0118], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 181: BSNE_Loss=0.1511,local_loss: 0.15109484088888495,global_loss: 0.4788961112499237\n",
      "Q: tensor([0.0186, 0.0319, 0.0100, 0.0157, 0.0065, 0.0147, 0.0406, 0.0103, 0.0332,\n",
      "        0.0022, 0.0048, 0.0295, 0.0250, 0.0373, 0.0065, 0.0108, 0.0067, 0.0347,\n",
      "        0.0047, 0.0484, 0.0099, 0.0060, 0.0183, 0.0414, 0.0062, 0.0225, 0.0069,\n",
      "        0.0435, 0.0058, 0.0062, 0.0116, 0.0089, 0.0129, 0.0139, 0.0109, 0.0093,\n",
      "        0.0132, 0.0519, 0.0469, 0.0050, 0.0131, 0.0842, 0.0099, 0.0046, 0.0617,\n",
      "        0.0242, 0.0153, 0.0108, 0.0065, 0.0263], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 182: BSNE_Loss=0.2501,local_loss: 0.25014980936038844,global_loss: 0.1837066411972046\n",
      "Q: tensor([0.0151, 0.0276, 0.0064, 0.0092, 0.0157, 0.0078, 0.0124, 0.0060, 0.0428,\n",
      "        0.0212, 0.0279, 0.0436, 0.0038, 0.0186, 0.0239, 0.0111, 0.0173, 0.0139,\n",
      "        0.0070, 0.0093, 0.0032, 0.0069, 0.0210, 0.0076, 0.0159, 0.0075, 0.0700,\n",
      "        0.0116, 0.0608, 0.0056, 0.0050, 0.0182, 0.0130, 0.0664, 0.0639, 0.0062,\n",
      "        0.0300, 0.0400, 0.0261, 0.0106, 0.0102, 0.0199, 0.0035, 0.0070, 0.0304,\n",
      "        0.0092, 0.0187, 0.0370, 0.0273, 0.0064], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 183: BSNE_Loss=0.2314,local_loss: 0.23144510518621034,global_loss: 0.2889865040779114\n",
      "Q: tensor([0.0336, 0.0063, 0.0161, 0.0504, 0.0053, 0.0821, 0.0081, 0.0156, 0.0072,\n",
      "        0.0231, 0.0080, 0.0225, 0.0223, 0.0053, 0.0150, 0.0062, 0.0089, 0.0106,\n",
      "        0.0224, 0.0187, 0.0078, 0.0299, 0.0122, 0.0140, 0.0186, 0.0156, 0.0107,\n",
      "        0.0183, 0.0057, 0.0699, 0.0053, 0.0157, 0.0376, 0.0701, 0.0079, 0.0267,\n",
      "        0.0136, 0.0325, 0.0049, 0.0048, 0.0153, 0.0147, 0.0277, 0.0085, 0.0104,\n",
      "        0.0115, 0.0775, 0.0074, 0.0071, 0.0106], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 184: BSNE_Loss=0.2323,local_loss: 0.23227206696518943,global_loss: 0.19196635484695435\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0225, 0.0239, 0.0463, 0.0275, 0.0095, 0.0057, 0.0022, 0.0449, 0.0246,\n",
      "        0.0027, 0.0124, 0.0237, 0.0035, 0.0168, 0.0118, 0.0125, 0.0102, 0.0558,\n",
      "        0.0179, 0.0146, 0.0315, 0.0589, 0.0085, 0.0032, 0.0157, 0.0050, 0.0560,\n",
      "        0.0207, 0.0071, 0.0240, 0.0306, 0.0097, 0.0088, 0.0208, 0.0098, 0.0127,\n",
      "        0.0193, 0.0078, 0.0291, 0.0260, 0.0237, 0.0578, 0.0127, 0.0169, 0.0153,\n",
      "        0.0091, 0.0219, 0.0094, 0.0154, 0.0237], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 185: BSNE_Loss=0.1831,local_loss: 0.18306779070218426,global_loss: 0.6562660336494446\n",
      "Q: tensor([0.0039, 0.0195, 0.0070, 0.0123, 0.0279, 0.0154, 0.0112, 0.0139, 0.0137,\n",
      "        0.0194, 0.0345, 0.1108, 0.0182, 0.0197, 0.0075, 0.0138, 0.0350, 0.0079,\n",
      "        0.0393, 0.0221, 0.0202, 0.0310, 0.0230, 0.0323, 0.0102, 0.0099, 0.0058,\n",
      "        0.0043, 0.0118, 0.0423, 0.0187, 0.0198, 0.0103, 0.0068, 0.0112, 0.0209,\n",
      "        0.0047, 0.0357, 0.0151, 0.0084, 0.0070, 0.0483, 0.0085, 0.0359, 0.0130,\n",
      "        0.0253, 0.0140, 0.0131, 0.0142, 0.0252], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 186: BSNE_Loss=0.1551,local_loss: 0.1551260407539217,global_loss: 0.6785638332366943\n",
      "Q: tensor([0.0338, 0.0527, 0.0175, 0.0125, 0.0224, 0.0147, 0.0202, 0.0300, 0.0149,\n",
      "        0.0111, 0.0219, 0.0132, 0.0129, 0.0085, 0.0341, 0.0123, 0.0065, 0.0108,\n",
      "        0.0110, 0.0287, 0.0149, 0.0087, 0.0364, 0.0509, 0.0209, 0.0071, 0.0157,\n",
      "        0.0141, 0.0159, 0.0126, 0.0535, 0.0130, 0.0159, 0.0224, 0.0582, 0.0151,\n",
      "        0.0086, 0.0303, 0.0162, 0.0036, 0.0176, 0.0166, 0.0222, 0.0184, 0.0138,\n",
      "        0.0155, 0.0441, 0.0042, 0.0061, 0.0177], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 187: BSNE_Loss=0.1010,local_loss: 0.10095725282241627,global_loss: 0.3938143253326416\n",
      "Q: tensor([0.0077, 0.1621, 0.0083, 0.0079, 0.0132, 0.0426, 0.0382, 0.0068, 0.0026,\n",
      "        0.0396, 0.0124, 0.0094, 0.0222, 0.0193, 0.0108, 0.0192, 0.0285, 0.0130,\n",
      "        0.0208, 0.0169, 0.0225, 0.0205, 0.0503, 0.0133, 0.0163, 0.0089, 0.0192,\n",
      "        0.0077, 0.0112, 0.0144, 0.0243, 0.0097, 0.0231, 0.0208, 0.0104, 0.0086,\n",
      "        0.0237, 0.0090, 0.0210, 0.0151, 0.0043, 0.0128, 0.0078, 0.0244, 0.0161,\n",
      "        0.0077, 0.0170, 0.0333, 0.0152, 0.0098], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 188: BSNE_Loss=0.1879,local_loss: 0.1879293030355509,global_loss: 0.38739439845085144\n",
      "Q: tensor([0.0102, 0.0073, 0.0351, 0.0043, 0.0078, 0.0231, 0.0170, 0.0131, 0.0091,\n",
      "        0.0164, 0.0138, 0.0229, 0.0481, 0.0142, 0.0082, 0.0439, 0.1075, 0.0098,\n",
      "        0.0229, 0.0127, 0.0093, 0.0377, 0.0098, 0.0054, 0.0211, 0.0069, 0.0149,\n",
      "        0.0159, 0.0088, 0.0561, 0.0761, 0.0071, 0.0063, 0.0171, 0.0357, 0.0111,\n",
      "        0.0125, 0.0202, 0.0207, 0.0119, 0.0036, 0.0117, 0.0199, 0.0274, 0.0161,\n",
      "        0.0327, 0.0067, 0.0077, 0.0128, 0.0094], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 189: BSNE_Loss=0.2056,local_loss: 0.20564360162876022,global_loss: 0.385720819234848\n",
      "Q: tensor([0.0807, 0.0357, 0.0406, 0.0219, 0.0170, 0.0137, 0.0089, 0.0278, 0.0052,\n",
      "        0.0128, 0.0184, 0.0389, 0.0094, 0.0046, 0.0144, 0.0073, 0.0221, 0.0060,\n",
      "        0.0284, 0.0124, 0.0097, 0.0168, 0.0205, 0.0163, 0.0253, 0.0294, 0.0051,\n",
      "        0.0226, 0.0176, 0.0280, 0.0082, 0.0164, 0.0071, 0.0192, 0.0472, 0.0082,\n",
      "        0.0439, 0.0130, 0.0092, 0.0135, 0.0151, 0.0252, 0.0112, 0.0283, 0.0074,\n",
      "        0.0265, 0.0520, 0.0077, 0.0164, 0.0066], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 190: BSNE_Loss=0.1355,local_loss: 0.13552440393771137,global_loss: 0.6723706722259521\n",
      "Q: tensor([0.0283, 0.0361, 0.0158, 0.0254, 0.0145, 0.0128, 0.0048, 0.0127, 0.0145,\n",
      "        0.0400, 0.0177, 0.0198, 0.0399, 0.0208, 0.0078, 0.0224, 0.0263, 0.0266,\n",
      "        0.0113, 0.0414, 0.0077, 0.0319, 0.0221, 0.0027, 0.0303, 0.0238, 0.0288,\n",
      "        0.0066, 0.0126, 0.0184, 0.0300, 0.0123, 0.0064, 0.0070, 0.0191, 0.0154,\n",
      "        0.0048, 0.0530, 0.0458, 0.0097, 0.0165, 0.0232, 0.0150, 0.0334, 0.0026,\n",
      "        0.0259, 0.0244, 0.0122, 0.0103, 0.0092], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 191: BSNE_Loss=0.1190,local_loss: 0.11898769176809512,global_loss: 0.2750171422958374\n",
      "Q: tensor([0.0411, 0.0041, 0.0473, 0.0204, 0.0173, 0.0316, 0.0139, 0.0185, 0.0089,\n",
      "        0.0330, 0.0155, 0.0087, 0.0650, 0.0281, 0.0230, 0.0116, 0.0054, 0.0113,\n",
      "        0.0138, 0.0137, 0.0299, 0.0200, 0.0089, 0.0103, 0.0199, 0.0358, 0.0214,\n",
      "        0.0092, 0.0107, 0.0241, 0.0393, 0.0090, 0.0078, 0.0428, 0.0076, 0.0106,\n",
      "        0.0120, 0.0085, 0.0268, 0.0210, 0.0104, 0.0167, 0.0107, 0.0446, 0.0393,\n",
      "        0.0071, 0.0210, 0.0176, 0.0088, 0.0160], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 192: BSNE_Loss=0.1094,local_loss: 0.10938019920964555,global_loss: 0.32218343019485474\n",
      "Q: tensor([0.0051, 0.0133, 0.0479, 0.0374, 0.0676, 0.0101, 0.0136, 0.0287, 0.0099,\n",
      "        0.0146, 0.0164, 0.0249, 0.0154, 0.0098, 0.0222, 0.0021, 0.0061, 0.0022,\n",
      "        0.0961, 0.0170, 0.0179, 0.0297, 0.0199, 0.0231, 0.0091, 0.0155, 0.0141,\n",
      "        0.0121, 0.0120, 0.0128, 0.0220, 0.0083, 0.0173, 0.0326, 0.0080, 0.0087,\n",
      "        0.0223, 0.0076, 0.0131, 0.0115, 0.0090, 0.0341, 0.0068, 0.0398, 0.0238,\n",
      "        0.0196, 0.0401, 0.0271, 0.0172, 0.0048], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 193: BSNE_Loss=0.1831,local_loss: 0.18313921983509,global_loss: 0.3777686655521393\n",
      "Q: tensor([0.0092, 0.0384, 0.0291, 0.0213, 0.0084, 0.0056, 0.0046, 0.0077, 0.0464,\n",
      "        0.0099, 0.0085, 0.0143, 0.0083, 0.0206, 0.0117, 0.0069, 0.0039, 0.0399,\n",
      "        0.0253, 0.0096, 0.0207, 0.0117, 0.0417, 0.0334, 0.0153, 0.0044, 0.0069,\n",
      "        0.0166, 0.0121, 0.0255, 0.0083, 0.0095, 0.0346, 0.0914, 0.0227, 0.0158,\n",
      "        0.0040, 0.0254, 0.0140, 0.0150, 0.0084, 0.0166, 0.0098, 0.0180, 0.0039,\n",
      "        0.0560, 0.0226, 0.0580, 0.0417, 0.0064], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 194: BSNE_Loss=0.2166,local_loss: 0.21663726248547654,global_loss: 0.23461730778217316\n",
      "Q: tensor([0.0311, 0.0134, 0.0116, 0.0213, 0.0045, 0.0035, 0.0125, 0.0078, 0.0212,\n",
      "        0.0331, 0.0280, 0.0085, 0.0071, 0.0025, 0.0150, 0.0144, 0.0211, 0.0117,\n",
      "        0.0278, 0.0409, 0.0166, 0.0191, 0.0228, 0.0103, 0.0214, 0.0055, 0.0113,\n",
      "        0.0224, 0.0529, 0.0242, 0.0253, 0.0149, 0.0029, 0.0198, 0.0102, 0.0271,\n",
      "        0.0406, 0.0229, 0.0502, 0.0413, 0.0053, 0.0089, 0.0470, 0.0162, 0.0598,\n",
      "        0.0073, 0.0241, 0.0058, 0.0138, 0.0128], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 3: BSNE_Loss=0.1696,local_loss: 0.1695660290962537,global_loss: 0.42778468132019043\n",
      "Q: tensor([0.0093, 0.0025, 0.0131, 0.0121, 0.0503, 0.0158, 0.0134, 0.0100, 0.0437,\n",
      "        0.0207, 0.0141, 0.0090, 0.0074, 0.0308, 0.0226, 0.0083, 0.0397, 0.0061,\n",
      "        0.0104, 0.0208, 0.0199, 0.0335, 0.0229, 0.0150, 0.0136, 0.0251, 0.0052,\n",
      "        0.0336, 0.0046, 0.0077, 0.0185, 0.0246, 0.0107, 0.0148, 0.0683, 0.0097,\n",
      "        0.0090, 0.0396, 0.0212, 0.0140, 0.0453, 0.0148, 0.0205, 0.0395, 0.0117,\n",
      "        0.0141, 0.0100, 0.0407, 0.0044, 0.0271], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 4: BSNE_Loss=0.1478,local_loss: 0.1477793313880396,global_loss: 0.33053168654441833\n",
      "Q: tensor([0.0237, 0.0104, 0.0212, 0.0262, 0.0106, 0.0048, 0.0273, 0.0214, 0.0205,\n",
      "        0.0152, 0.0044, 0.0074, 0.0079, 0.0565, 0.0189, 0.0239, 0.0191, 0.0165,\n",
      "        0.0675, 0.0084, 0.0067, 0.0119, 0.0176, 0.0097, 0.0218, 0.0140, 0.0221,\n",
      "        0.0213, 0.0176, 0.0155, 0.0217, 0.0257, 0.0326, 0.0081, 0.0204, 0.0172,\n",
      "        0.0199, 0.0189, 0.0186, 0.0070, 0.0349, 0.0157, 0.0178, 0.0124, 0.0625,\n",
      "        0.0219, 0.0143, 0.0284, 0.0281, 0.0038], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 5: BSNE_Loss=0.0961,local_loss: 0.09613367098880352,global_loss: 0.5104087591171265\n",
      "Q: tensor([0.0147, 0.0197, 0.0059, 0.0062, 0.0144, 0.0043, 0.0217, 0.0030, 0.0094,\n",
      "        0.0188, 0.0082, 0.0062, 0.0764, 0.0145, 0.0107, 0.0107, 0.0019, 0.0984,\n",
      "        0.0364, 0.0218, 0.0805, 0.0080, 0.0099, 0.0143, 0.0210, 0.0217, 0.0110,\n",
      "        0.0297, 0.0162, 0.0177, 0.0310, 0.0113, 0.0200, 0.0168, 0.0062, 0.0176,\n",
      "        0.0038, 0.0268, 0.0085, 0.0274, 0.0252, 0.0267, 0.0077, 0.0101, 0.0077,\n",
      "        0.0262, 0.0162, 0.0194, 0.0192, 0.0389], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 6: BSNE_Loss=0.2267,local_loss: 0.2267303894186435,global_loss: 0.15074145793914795\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0121, 0.0044, 0.0057, 0.0044, 0.0040, 0.0093, 0.0119, 0.0183, 0.0320,\n",
      "        0.0069, 0.0146, 0.0185, 0.0117, 0.0451, 0.0104, 0.0307, 0.0134, 0.0268,\n",
      "        0.0301, 0.0313, 0.0101, 0.0350, 0.0113, 0.0285, 0.0568, 0.0270, 0.0180,\n",
      "        0.0072, 0.0118, 0.0128, 0.0383, 0.0126, 0.0161, 0.0184, 0.0121, 0.0174,\n",
      "        0.0246, 0.0145, 0.0078, 0.0356, 0.0196, 0.0139, 0.0117, 0.0134, 0.0312,\n",
      "        0.0046, 0.0298, 0.1004, 0.0019, 0.0159], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 7: BSNE_Loss=0.1808,local_loss: 0.18082022986743934,global_loss: 0.3880663216114044\n",
      "Q: tensor([0.0180, 0.0235, 0.0063, 0.0178, 0.0272, 0.0173, 0.0218, 0.0252, 0.0175,\n",
      "        0.0477, 0.0296, 0.0057, 0.0124, 0.0150, 0.0148, 0.0060, 0.0089, 0.0185,\n",
      "        0.0160, 0.0227, 0.0277, 0.0243, 0.0134, 0.0238, 0.0105, 0.0094, 0.0068,\n",
      "        0.0094, 0.0109, 0.0323, 0.0321, 0.0342, 0.0180, 0.0207, 0.0136, 0.0225,\n",
      "        0.0163, 0.0696, 0.0209, 0.0384, 0.0045, 0.0581, 0.0235, 0.0070, 0.0142,\n",
      "        0.0247, 0.0107, 0.0036, 0.0069, 0.0197], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 8: BSNE_Loss=0.1093,local_loss: 0.10925697380895283,global_loss: 0.06010894477367401\n",
      "Q: tensor([0.0159, 0.0054, 0.0193, 0.0072, 0.0368, 0.0095, 0.0242, 0.0164, 0.0115,\n",
      "        0.0266, 0.0123, 0.0121, 0.0160, 0.0162, 0.0048, 0.0072, 0.0458, 0.0191,\n",
      "        0.0131, 0.0385, 0.0167, 0.0079, 0.0104, 0.0191, 0.0185, 0.0166, 0.0499,\n",
      "        0.0219, 0.0424, 0.0106, 0.0416, 0.0146, 0.0225, 0.0051, 0.0142, 0.0333,\n",
      "        0.0183, 0.0266, 0.0492, 0.0066, 0.0051, 0.0116, 0.0309, 0.0065, 0.0037,\n",
      "        0.0488, 0.0170, 0.0115, 0.0167, 0.0440], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 9: BSNE_Loss=0.1358,local_loss: 0.13581009658778345,global_loss: 0.18666982650756836\n",
      "Q: tensor([0.0254, 0.0061, 0.0400, 0.0148, 0.0088, 0.0239, 0.0050, 0.0076, 0.0309,\n",
      "        0.0207, 0.0238, 0.0266, 0.0090, 0.0207, 0.0269, 0.0092, 0.0165, 0.0065,\n",
      "        0.0719, 0.0139, 0.0104, 0.0083, 0.0158, 0.0029, 0.0145, 0.0155, 0.0069,\n",
      "        0.0202, 0.0156, 0.0057, 0.0393, 0.0097, 0.0072, 0.0207, 0.0691, 0.0318,\n",
      "        0.0177, 0.0059, 0.0138, 0.0120, 0.0356, 0.0553, 0.0076, 0.0220, 0.0495,\n",
      "        0.0093, 0.0120, 0.0226, 0.0244, 0.0109], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 10: BSNE_Loss=0.1696,local_loss: 0.16958562804261682,global_loss: 0.07814397662878036\n",
      "Q: tensor([0.0153, 0.0125, 0.0135, 0.0351, 0.0197, 0.0144, 0.0138, 0.0160, 0.0319,\n",
      "        0.0126, 0.0072, 0.0065, 0.0503, 0.0088, 0.0316, 0.0161, 0.0285, 0.0059,\n",
      "        0.0426, 0.0151, 0.0772, 0.0074, 0.0049, 0.0270, 0.0330, 0.0101, 0.0110,\n",
      "        0.0290, 0.0124, 0.0064, 0.0962, 0.0023, 0.0141, 0.0082, 0.0218, 0.0100,\n",
      "        0.0267, 0.0052, 0.0178, 0.0137, 0.0220, 0.0231, 0.0085, 0.0137, 0.0235,\n",
      "        0.0317, 0.0155, 0.0060, 0.0031, 0.0210], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 11: BSNE_Loss=0.1994,local_loss: 0.1994463796507739,global_loss: -0.013682261109352112\n",
      "Q: tensor([0.0101, 0.0229, 0.0152, 0.0180, 0.0028, 0.0054, 0.0116, 0.0400, 0.0221,\n",
      "        0.0193, 0.0215, 0.0311, 0.0133, 0.0191, 0.0111, 0.0265, 0.0503, 0.0090,\n",
      "        0.0198, 0.0089, 0.0152, 0.0028, 0.0107, 0.0067, 0.0951, 0.0049, 0.0032,\n",
      "        0.0213, 0.0058, 0.0158, 0.0291, 0.0325, 0.0116, 0.0537, 0.0097, 0.0071,\n",
      "        0.0081, 0.0074, 0.0082, 0.0156, 0.0560, 0.0373, 0.0203, 0.0376, 0.0070,\n",
      "        0.0162, 0.0345, 0.0138, 0.0134, 0.0215], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 12: BSNE_Loss=0.2096,local_loss: 0.2095800516556302,global_loss: -0.006677880883216858\n",
      "Q: tensor([0.0087, 0.0381, 0.0121, 0.0228, 0.0397, 0.0106, 0.0043, 0.0081, 0.0310,\n",
      "        0.0183, 0.0118, 0.0106, 0.0169, 0.0216, 0.0165, 0.0400, 0.0268, 0.0386,\n",
      "        0.0039, 0.0064, 0.0187, 0.0409, 0.0130, 0.0082, 0.0066, 0.0168, 0.0509,\n",
      "        0.0095, 0.0160, 0.0354, 0.0076, 0.0361, 0.0078, 0.0193, 0.0230, 0.0182,\n",
      "        0.0112, 0.0129, 0.0204, 0.0149, 0.0226, 0.0295, 0.0395, 0.0051, 0.0364,\n",
      "        0.0224, 0.0215, 0.0123, 0.0106, 0.0257], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 13: BSNE_Loss=0.1056,local_loss: 0.10557415368327294,global_loss: -0.016715032979846\n",
      "Q: tensor([0.0149, 0.0399, 0.0246, 0.0105, 0.0059, 0.0138, 0.0277, 0.0265, 0.0073,\n",
      "        0.0215, 0.0491, 0.0120, 0.0235, 0.0084, 0.0139, 0.0087, 0.0282, 0.0409,\n",
      "        0.0061, 0.0178, 0.0042, 0.0260, 0.0127, 0.0052, 0.0299, 0.0519, 0.0183,\n",
      "        0.0094, 0.0060, 0.0134, 0.0080, 0.0211, 0.0540, 0.0081, 0.0276, 0.0485,\n",
      "        0.0152, 0.0260, 0.0026, 0.0056, 0.0413, 0.0325, 0.0166, 0.0048, 0.0383,\n",
      "        0.0145, 0.0117, 0.0199, 0.0113, 0.0139], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 14: BSNE_Loss=0.1634,local_loss: 0.16342961470468173,global_loss: 0.2494201511144638\n",
      "Q: tensor([0.0385, 0.0100, 0.0060, 0.0084, 0.0098, 0.0284, 0.0176, 0.0256, 0.0234,\n",
      "        0.0298, 0.0428, 0.0037, 0.0075, 0.0080, 0.0427, 0.0345, 0.0159, 0.0180,\n",
      "        0.0321, 0.0100, 0.0177, 0.0052, 0.0153, 0.0054, 0.0102, 0.0102, 0.0199,\n",
      "        0.0333, 0.0149, 0.0043, 0.0285, 0.0069, 0.0175, 0.0123, 0.0248, 0.0060,\n",
      "        0.0055, 0.0161, 0.0159, 0.0265, 0.0231, 0.0283, 0.0578, 0.0289, 0.0286,\n",
      "        0.0065, 0.0177, 0.0219, 0.0368, 0.0412], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 15: BSNE_Loss=0.1349,local_loss: 0.13491640509420488,global_loss: 0.1411779522895813\n",
      "Q: tensor([0.0085, 0.0382, 0.0264, 0.0459, 0.0119, 0.0118, 0.0041, 0.0274, 0.0110,\n",
      "        0.0226, 0.0209, 0.0098, 0.0487, 0.0110, 0.0041, 0.0023, 0.0070, 0.0079,\n",
      "        0.0185, 0.0301, 0.0145, 0.0183, 0.0136, 0.0147, 0.0234, 0.0370, 0.0032,\n",
      "        0.0144, 0.0201, 0.0054, 0.0085, 0.0104, 0.0096, 0.0201, 0.0886, 0.0142,\n",
      "        0.0237, 0.0143, 0.0205, 0.0555, 0.0112, 0.0298, 0.0130, 0.0167, 0.0520,\n",
      "        0.0264, 0.0095, 0.0043, 0.0287, 0.0105], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 16: BSNE_Loss=0.1929,local_loss: 0.1928614924838378,global_loss: 0.03662818297743797\n",
      "Q: tensor([0.0179, 0.0214, 0.0062, 0.0020, 0.0167, 0.0075, 0.0157, 0.0171, 0.0074,\n",
      "        0.0115, 0.0182, 0.0269, 0.0218, 0.0175, 0.0273, 0.0187, 0.0125, 0.0287,\n",
      "        0.0097, 0.0123, 0.0195, 0.0742, 0.0176, 0.0494, 0.0265, 0.0150, 0.0074,\n",
      "        0.0276, 0.0191, 0.0519, 0.0440, 0.0205, 0.0124, 0.0236, 0.0213, 0.0162,\n",
      "        0.0446, 0.0199, 0.0139, 0.0072, 0.0429, 0.0083, 0.0285, 0.0098, 0.0104,\n",
      "        0.0138, 0.0063, 0.0053, 0.0155, 0.0100], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 17: BSNE_Loss=0.1250,local_loss: 0.12497638293559714,global_loss: 0.29345816373825073\n",
      "Q: tensor([0.0253, 0.0205, 0.0343, 0.0051, 0.0312, 0.0204, 0.0046, 0.0603, 0.0907,\n",
      "        0.0070, 0.0223, 0.0149, 0.0101, 0.0129, 0.0544, 0.0126, 0.0230, 0.0216,\n",
      "        0.0111, 0.0147, 0.0037, 0.0061, 0.0079, 0.0249, 0.0047, 0.0335, 0.0235,\n",
      "        0.0242, 0.0511, 0.0120, 0.0468, 0.0434, 0.0158, 0.0045, 0.0127, 0.0109,\n",
      "        0.0107, 0.0190, 0.0073, 0.0160, 0.0062, 0.0118, 0.0104, 0.0328, 0.0103,\n",
      "        0.0054, 0.0046, 0.0091, 0.0163, 0.0177], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 18: BSNE_Loss=0.2112,local_loss: 0.2111536796381121,global_loss: -0.004850449506193399\n",
      "Q: tensor([0.0554, 0.0142, 0.0229, 0.0080, 0.0189, 0.0162, 0.0132, 0.0123, 0.0195,\n",
      "        0.0082, 0.0211, 0.0218, 0.0071, 0.0047, 0.0069, 0.0226, 0.0096, 0.0113,\n",
      "        0.0087, 0.0220, 0.0315, 0.0370, 0.0420, 0.0145, 0.0088, 0.0228, 0.0224,\n",
      "        0.0066, 0.0137, 0.0297, 0.0177, 0.0124, 0.0134, 0.0093, 0.0243, 0.0365,\n",
      "        0.0539, 0.0145, 0.0302, 0.0324, 0.0637, 0.0288, 0.0140, 0.0126, 0.0216,\n",
      "        0.0073, 0.0131, 0.0111, 0.0091, 0.0202], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 19: BSNE_Loss=0.0990,local_loss: 0.09904441999280396,global_loss: 0.21109038591384888\n",
      "Q: tensor([0.0057, 0.0244, 0.0046, 0.0151, 0.0264, 0.0055, 0.0088, 0.0391, 0.0429,\n",
      "        0.0144, 0.0176, 0.0352, 0.0113, 0.0317, 0.0439, 0.0203, 0.0089, 0.0225,\n",
      "        0.0086, 0.0377, 0.0053, 0.0121, 0.0043, 0.0102, 0.0183, 0.0183, 0.0116,\n",
      "        0.0159, 0.0172, 0.0139, 0.0165, 0.0724, 0.0236, 0.0048, 0.0026, 0.0083,\n",
      "        0.0295, 0.0139, 0.0072, 0.0190, 0.0231, 0.0159, 0.0158, 0.0202, 0.0114,\n",
      "        0.0116, 0.0124, 0.0648, 0.0655, 0.0099], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 20: BSNE_Loss=0.1810,local_loss: 0.18099804822992469,global_loss: 0.10989788174629211\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0221, 0.0264, 0.0267, 0.0085, 0.0137, 0.0093, 0.0373, 0.0055, 0.0299,\n",
      "        0.0142, 0.0072, 0.0118, 0.0136, 0.0307, 0.0169, 0.0317, 0.0172, 0.0269,\n",
      "        0.0222, 0.0125, 0.0076, 0.0450, 0.0133, 0.0049, 0.0215, 0.0411, 0.0249,\n",
      "        0.0039, 0.0111, 0.0212, 0.0104, 0.0079, 0.0273, 0.0156, 0.0103, 0.0049,\n",
      "        0.0068, 0.0288, 0.0490, 0.0658, 0.0048, 0.0371, 0.0315, 0.0311, 0.0268,\n",
      "        0.0161, 0.0144, 0.0106, 0.0134, 0.0087], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 21: BSNE_Loss=0.1322,local_loss: 0.13215076331886993,global_loss: -0.02838364988565445\n",
      "Q: tensor([0.0114, 0.0160, 0.0148, 0.0233, 0.0366, 0.0048, 0.0068, 0.0109, 0.0271,\n",
      "        0.0092, 0.0218, 0.0082, 0.0351, 0.0088, 0.0128, 0.0147, 0.1762, 0.0271,\n",
      "        0.0075, 0.0452, 0.0129, 0.0112, 0.0077, 0.0051, 0.0169, 0.0209, 0.0273,\n",
      "        0.0075, 0.0075, 0.0164, 0.0571, 0.0131, 0.0182, 0.0056, 0.0234, 0.0184,\n",
      "        0.0248, 0.0124, 0.0194, 0.0224, 0.0060, 0.0358, 0.0128, 0.0164, 0.0173,\n",
      "        0.0138, 0.0024, 0.0097, 0.0096, 0.0096], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 22: BSNE_Loss=0.2302,local_loss: 0.23020206854897718,global_loss: -0.12660261988639832\n",
      "Q: tensor([0.0151, 0.0112, 0.0190, 0.0666, 0.0050, 0.0114, 0.0169, 0.0135, 0.0053,\n",
      "        0.0179, 0.0132, 0.0121, 0.0037, 0.0265, 0.0056, 0.0087, 0.0742, 0.0460,\n",
      "        0.0110, 0.0229, 0.0422, 0.0171, 0.0227, 0.0088, 0.0119, 0.0121, 0.0272,\n",
      "        0.0180, 0.0548, 0.0084, 0.0099, 0.0127, 0.0124, 0.0102, 0.0143, 0.0183,\n",
      "        0.0263, 0.0271, 0.0289, 0.0086, 0.0107, 0.0279, 0.0200, 0.0060, 0.0324,\n",
      "        0.0185, 0.0430, 0.0089, 0.0143, 0.0207], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 23: BSNE_Loss=0.1426,local_loss: 0.14258582371638967,global_loss: 0.1923009157180786\n",
      "Q: tensor([0.0104, 0.0420, 0.0251, 0.0096, 0.0100, 0.0165, 0.0310, 0.0229, 0.0287,\n",
      "        0.0378, 0.0206, 0.0250, 0.0182, 0.0107, 0.0143, 0.0160, 0.0144, 0.0050,\n",
      "        0.0239, 0.0229, 0.0123, 0.0064, 0.0166, 0.0202, 0.0320, 0.0119, 0.0354,\n",
      "        0.0121, 0.0076, 0.0329, 0.0102, 0.0548, 0.0173, 0.0242, 0.0117, 0.0290,\n",
      "        0.0396, 0.0086, 0.0064, 0.0185, 0.0825, 0.0200, 0.0141, 0.0035, 0.0154,\n",
      "        0.0163, 0.0051, 0.0117, 0.0088, 0.0097], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 24: BSNE_Loss=0.1198,local_loss: 0.11976420800572694,global_loss: 0.08339793235063553\n",
      "Q: tensor([0.0252, 0.0074, 0.0409, 0.0232, 0.0271, 0.0217, 0.0174, 0.0125, 0.0483,\n",
      "        0.0135, 0.0301, 0.0195, 0.0143, 0.0302, 0.0118, 0.0056, 0.0184, 0.0255,\n",
      "        0.0245, 0.0433, 0.0164, 0.0120, 0.0196, 0.0471, 0.0079, 0.0182, 0.0279,\n",
      "        0.0413, 0.0086, 0.0195, 0.0130, 0.0203, 0.0159, 0.0124, 0.0238, 0.0098,\n",
      "        0.0326, 0.0049, 0.0256, 0.0119, 0.0180, 0.0116, 0.0283, 0.0061, 0.0086,\n",
      "        0.0115, 0.0116, 0.0161, 0.0118, 0.0275], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 25: BSNE_Loss=0.0623,local_loss: 0.062337177025564613,global_loss: -0.005796750541776419\n",
      "Q: tensor([0.0096, 0.0258, 0.0249, 0.0145, 0.0028, 0.0099, 0.0080, 0.0265, 0.0117,\n",
      "        0.0134, 0.0055, 0.0111, 0.0056, 0.0028, 0.0109, 0.0050, 0.0307, 0.0280,\n",
      "        0.0084, 0.0692, 0.0282, 0.0048, 0.0074, 0.0107, 0.0078, 0.0277, 0.0317,\n",
      "        0.1301, 0.0155, 0.0044, 0.0079, 0.0275, 0.0704, 0.0152, 0.0307, 0.0176,\n",
      "        0.0135, 0.0391, 0.0209, 0.0163, 0.0019, 0.0365, 0.0235, 0.0083, 0.0172,\n",
      "        0.0143, 0.0244, 0.0052, 0.0093, 0.0079], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 26: BSNE_Loss=0.2887,local_loss: 0.28874546176640314,global_loss: 0.278555303812027\n",
      "Q: tensor([0.0113, 0.0215, 0.0043, 0.0107, 0.0125, 0.0265, 0.0037, 0.0162, 0.0286,\n",
      "        0.0469, 0.0041, 0.0101, 0.0113, 0.0182, 0.0340, 0.0578, 0.0065, 0.0140,\n",
      "        0.0153, 0.0391, 0.0111, 0.0185, 0.0028, 0.0489, 0.0149, 0.0263, 0.0107,\n",
      "        0.0344, 0.0402, 0.0195, 0.0147, 0.0122, 0.0156, 0.0103, 0.0075, 0.0174,\n",
      "        0.0327, 0.0122, 0.0205, 0.0267, 0.0011, 0.0364, 0.0114, 0.0211, 0.0327,\n",
      "        0.0160, 0.0204, 0.0298, 0.0131, 0.0285], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 27: BSNE_Loss=0.1540,local_loss: 0.15399146196476654,global_loss: 0.14780041575431824\n",
      "Q: tensor([0.0127, 0.0390, 0.0024, 0.0038, 0.0059, 0.0066, 0.0090, 0.0048, 0.1804,\n",
      "        0.0179, 0.0102, 0.0172, 0.0065, 0.0193, 0.0074, 0.0224, 0.0124, 0.0069,\n",
      "        0.0070, 0.0154, 0.0029, 0.0360, 0.0094, 0.0240, 0.0174, 0.0112, 0.0248,\n",
      "        0.0132, 0.0204, 0.0149, 0.0194, 0.0678, 0.0277, 0.0057, 0.0170, 0.0125,\n",
      "        0.0268, 0.0050, 0.0283, 0.0204, 0.0149, 0.0142, 0.0343, 0.0029, 0.0364,\n",
      "        0.0211, 0.0039, 0.0208, 0.0271, 0.0128], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 28: BSNE_Loss=0.2990,local_loss: 0.2990201246857292,global_loss: 0.22726911306381226\n",
      "Q: tensor([0.0349, 0.0111, 0.0212, 0.0149, 0.0088, 0.0129, 0.0287, 0.0331, 0.0232,\n",
      "        0.0099, 0.0114, 0.0065, 0.0230, 0.0028, 0.0121, 0.0188, 0.0105, 0.0190,\n",
      "        0.0313, 0.0560, 0.0139, 0.0319, 0.0033, 0.0186, 0.0094, 0.0222, 0.0281,\n",
      "        0.0409, 0.0078, 0.0209, 0.0293, 0.0394, 0.0186, 0.0078, 0.0301, 0.0079,\n",
      "        0.0051, 0.0218, 0.0397, 0.0100, 0.0299, 0.0465, 0.0222, 0.0040, 0.0236,\n",
      "        0.0026, 0.0327, 0.0272, 0.0062, 0.0084], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 29: BSNE_Loss=0.1555,local_loss: 0.15546516859108997,global_loss: 0.08834869414567947\n",
      "Q: tensor([0.0415, 0.0134, 0.0179, 0.0050, 0.0198, 0.0120, 0.0061, 0.0074, 0.0658,\n",
      "        0.0080, 0.0071, 0.0149, 0.0107, 0.0229, 0.0112, 0.0064, 0.0175, 0.0169,\n",
      "        0.0457, 0.0412, 0.0271, 0.0271, 0.0187, 0.0182, 0.0495, 0.0092, 0.0064,\n",
      "        0.0415, 0.0097, 0.0047, 0.0648, 0.0217, 0.0056, 0.0194, 0.0243, 0.0067,\n",
      "        0.0066, 0.0159, 0.0046, 0.0135, 0.0170, 0.0206, 0.0346, 0.0050, 0.0107,\n",
      "        0.0274, 0.0271, 0.0135, 0.0467, 0.0107], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 30: BSNE_Loss=0.1809,local_loss: 0.1809188566479848,global_loss: 0.13398444652557373\n",
      "Q: tensor([0.0169, 0.0231, 0.0036, 0.0372, 0.0054, 0.0155, 0.0295, 0.0223, 0.0296,\n",
      "        0.0077, 0.0211, 0.0151, 0.0087, 0.0085, 0.0088, 0.0248, 0.0177, 0.0785,\n",
      "        0.0225, 0.0234, 0.0194, 0.0339, 0.0102, 0.0188, 0.0077, 0.0194, 0.0103,\n",
      "        0.0023, 0.0269, 0.0246, 0.0127, 0.0116, 0.0130, 0.0121, 0.0122, 0.0255,\n",
      "        0.0093, 0.0261, 0.0109, 0.0083, 0.0187, 0.0559, 0.0142, 0.0104, 0.0489,\n",
      "        0.0162, 0.0059, 0.0624, 0.0252, 0.0070], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 31: BSNE_Loss=0.1514,local_loss: 0.15140036373436835,global_loss: -0.09105512499809265\n",
      "Q: tensor([0.0108, 0.0130, 0.0261, 0.0083, 0.0095, 0.0098, 0.0295, 0.0158, 0.0077,\n",
      "        0.0335, 0.0051, 0.0087, 0.0083, 0.0186, 0.0170, 0.0040, 0.0306, 0.0315,\n",
      "        0.0140, 0.0251, 0.0239, 0.0187, 0.0130, 0.0096, 0.0135, 0.0055, 0.0215,\n",
      "        0.0328, 0.0275, 0.0138, 0.0308, 0.0288, 0.0632, 0.0360, 0.0606, 0.0151,\n",
      "        0.0098, 0.0093, 0.0320, 0.0114, 0.0462, 0.0384, 0.0219, 0.0055, 0.0459,\n",
      "        0.0049, 0.0093, 0.0092, 0.0092, 0.0059], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 32: BSNE_Loss=0.1538,local_loss: 0.15379749266176637,global_loss: 0.1739412397146225\n",
      "Q: tensor([0.0100, 0.0104, 0.0100, 0.0437, 0.0429, 0.0100, 0.0041, 0.0144, 0.0352,\n",
      "        0.0221, 0.0060, 0.0141, 0.0292, 0.0095, 0.0050, 0.0036, 0.0095, 0.0577,\n",
      "        0.0044, 0.0318, 0.0058, 0.0040, 0.0211, 0.0163, 0.0250, 0.0153, 0.0110,\n",
      "        0.0092, 0.0136, 0.0464, 0.0111, 0.0058, 0.0153, 0.0137, 0.0042, 0.0598,\n",
      "        0.0587, 0.0132, 0.0215, 0.0131, 0.0108, 0.0163, 0.0193, 0.0297, 0.0784,\n",
      "        0.0050, 0.0060, 0.0423, 0.0158, 0.0187], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 33: BSNE_Loss=0.2373,local_loss: 0.2372948169543988,global_loss: 0.2793843448162079\n",
      "Q: tensor([0.0156, 0.0060, 0.0077, 0.0067, 0.0387, 0.0201, 0.0069, 0.0035, 0.0044,\n",
      "        0.0138, 0.0047, 0.0146, 0.0102, 0.0142, 0.0063, 0.0751, 0.0446, 0.0154,\n",
      "        0.0678, 0.0273, 0.0105, 0.0481, 0.0149, 0.0314, 0.0077, 0.0296, 0.0060,\n",
      "        0.0242, 0.0130, 0.0236, 0.0270, 0.0119, 0.0091, 0.0419, 0.0185, 0.0046,\n",
      "        0.0155, 0.0067, 0.0084, 0.0126, 0.0276, 0.0143, 0.0653, 0.0566, 0.0187,\n",
      "        0.0148, 0.0070, 0.0064, 0.0048, 0.0158], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 34: BSNE_Loss=0.2374,local_loss: 0.2373924068494791,global_loss: 0.17354856431484222\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0145, 0.0104, 0.0192, 0.0176, 0.0148, 0.0211, 0.0058, 0.0338, 0.0151,\n",
      "        0.0050, 0.0082, 0.0120, 0.0115, 0.0286, 0.0136, 0.0161, 0.0342, 0.0370,\n",
      "        0.0077, 0.0344, 0.0031, 0.0110, 0.0542, 0.0122, 0.0192, 0.0187, 0.0056,\n",
      "        0.0439, 0.0440, 0.0261, 0.0104, 0.0080, 0.0434, 0.0035, 0.0366, 0.0473,\n",
      "        0.0049, 0.0281, 0.0096, 0.0090, 0.0129, 0.0978, 0.0143, 0.0198, 0.0080,\n",
      "        0.0181, 0.0037, 0.0116, 0.0109, 0.0035], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 35: BSNE_Loss=0.2204,local_loss: 0.22035076093148973,global_loss: 0.11812640726566315\n",
      "Q: tensor([0.0197, 0.0067, 0.0216, 0.0319, 0.0228, 0.0026, 0.0324, 0.0065, 0.0211,\n",
      "        0.0154, 0.0437, 0.0210, 0.0136, 0.0226, 0.0392, 0.0092, 0.0177, 0.0774,\n",
      "        0.0204, 0.0071, 0.0281, 0.0102, 0.0236, 0.0268, 0.0051, 0.0192, 0.0182,\n",
      "        0.0127, 0.0124, 0.0204, 0.0048, 0.0355, 0.0087, 0.0081, 0.0147, 0.0282,\n",
      "        0.0263, 0.0108, 0.0053, 0.0093, 0.0272, 0.0312, 0.0341, 0.0279, 0.0056,\n",
      "        0.0111, 0.0265, 0.0276, 0.0103, 0.0174], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 36: BSNE_Loss=0.1247,local_loss: 0.12465602807401048,global_loss: 0.2601238787174225\n",
      "Q: tensor([0.0412, 0.0143, 0.0097, 0.0245, 0.0083, 0.0196, 0.0039, 0.0146, 0.0206,\n",
      "        0.0204, 0.0047, 0.0320, 0.0158, 0.0052, 0.0093, 0.0118, 0.0147, 0.0923,\n",
      "        0.0075, 0.0337, 0.0371, 0.0116, 0.0158, 0.0097, 0.0081, 0.0108, 0.0052,\n",
      "        0.0111, 0.0184, 0.0049, 0.0512, 0.0125, 0.0128, 0.0129, 0.0314, 0.0229,\n",
      "        0.0260, 0.0832, 0.0121, 0.0327, 0.0058, 0.0121, 0.0196, 0.0226, 0.0107,\n",
      "        0.0039, 0.0322, 0.0205, 0.0237, 0.0146], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 37: BSNE_Loss=0.1884,local_loss: 0.1884430710666348,global_loss: 0.1337372213602066\n",
      "Q: tensor([0.0120, 0.0211, 0.0257, 0.0150, 0.0122, 0.0085, 0.0102, 0.0027, 0.0098,\n",
      "        0.0239, 0.0025, 0.0084, 0.1069, 0.0067, 0.0072, 0.0235, 0.0098, 0.0059,\n",
      "        0.0094, 0.0090, 0.0404, 0.0287, 0.0047, 0.0131, 0.0149, 0.0267, 0.0047,\n",
      "        0.0095, 0.0208, 0.0043, 0.0085, 0.0094, 0.0054, 0.1169, 0.0096, 0.0333,\n",
      "        0.0463, 0.0051, 0.0263, 0.0092, 0.0091, 0.0253, 0.0171, 0.0121, 0.0808,\n",
      "        0.0126, 0.0067, 0.0225, 0.0051, 0.0408], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 38: BSNE_Loss=0.3296,local_loss: 0.32955359905351855,global_loss: 0.16240294277668\n",
      "Q: tensor([0.0037, 0.0204, 0.0105, 0.0078, 0.0241, 0.0035, 0.0203, 0.0160, 0.0107,\n",
      "        0.0207, 0.0177, 0.0148, 0.0133, 0.0119, 0.0104, 0.0528, 0.0093, 0.0553,\n",
      "        0.0176, 0.0674, 0.0247, 0.0066, 0.0122, 0.0046, 0.0031, 0.0019, 0.0142,\n",
      "        0.0254, 0.0235, 0.0450, 0.0209, 0.0258, 0.0341, 0.0093, 0.0159, 0.0184,\n",
      "        0.0095, 0.0130, 0.0502, 0.0075, 0.0161, 0.0170, 0.0375, 0.0324, 0.0502,\n",
      "        0.0358, 0.0048, 0.0139, 0.0094, 0.0089], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 39: BSNE_Loss=0.1999,local_loss: 0.19988126818274168,global_loss: 0.05951740965247154\n",
      "Q: tensor([0.0155, 0.0337, 0.0221, 0.0088, 0.0241, 0.0083, 0.0200, 0.0143, 0.0448,\n",
      "        0.0183, 0.0266, 0.0168, 0.0111, 0.0195, 0.0183, 0.0046, 0.0128, 0.0315,\n",
      "        0.0214, 0.0079, 0.0118, 0.0094, 0.0229, 0.0049, 0.0069, 0.0117, 0.0748,\n",
      "        0.0452, 0.0343, 0.0191, 0.0168, 0.0077, 0.0395, 0.0190, 0.0116, 0.0443,\n",
      "        0.0384, 0.0051, 0.0071, 0.0345, 0.0110, 0.0115, 0.0120, 0.0180, 0.0074,\n",
      "        0.0186, 0.0043, 0.0229, 0.0297, 0.0190], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 40: BSNE_Loss=0.1278,local_loss: 0.12782883189435182,global_loss: 0.1301804631948471\n",
      "Q: tensor([0.0176, 0.0036, 0.0097, 0.0116, 0.0235, 0.0187, 0.0336, 0.0249, 0.0694,\n",
      "        0.0070, 0.0048, 0.0174, 0.0196, 0.0184, 0.0177, 0.0519, 0.0331, 0.0122,\n",
      "        0.0130, 0.0074, 0.0143, 0.0282, 0.0078, 0.0107, 0.0100, 0.0731, 0.0163,\n",
      "        0.0074, 0.0104, 0.0215, 0.0084, 0.0093, 0.0094, 0.0352, 0.0387, 0.0195,\n",
      "        0.0223, 0.0025, 0.0228, 0.0136, 0.0130, 0.0536, 0.0163, 0.0192, 0.0141,\n",
      "        0.0033, 0.0245, 0.0344, 0.0047, 0.0201], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 41: BSNE_Loss=0.1767,local_loss: 0.1767193614442448,global_loss: 0.4138122498989105\n",
      "Q: tensor([0.0486, 0.0168, 0.0118, 0.0357, 0.0043, 0.0136, 0.0161, 0.0178, 0.0575,\n",
      "        0.0690, 0.0105, 0.0152, 0.0226, 0.0044, 0.0143, 0.0139, 0.0078, 0.0266,\n",
      "        0.0109, 0.0171, 0.0147, 0.0083, 0.0205, 0.0236, 0.0587, 0.0177, 0.0342,\n",
      "        0.0059, 0.0138, 0.0065, 0.0242, 0.0488, 0.0263, 0.0103, 0.0128, 0.0104,\n",
      "        0.0255, 0.0104, 0.0251, 0.0164, 0.0134, 0.0219, 0.0281, 0.0130, 0.0066,\n",
      "        0.0294, 0.0101, 0.0166, 0.0021, 0.0100], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 42: BSNE_Loss=0.1479,local_loss: 0.1479228457031736,global_loss: 0.2589370608329773\n",
      "Q: tensor([0.0247, 0.0442, 0.0144, 0.0095, 0.0151, 0.0523, 0.0220, 0.0121, 0.0429,\n",
      "        0.0069, 0.0141, 0.0049, 0.0623, 0.0449, 0.0026, 0.0171, 0.0886, 0.0083,\n",
      "        0.0219, 0.0027, 0.0152, 0.0089, 0.0118, 0.0166, 0.0388, 0.0301, 0.0036,\n",
      "        0.0151, 0.0130, 0.0137, 0.0503, 0.0217, 0.0543, 0.0090, 0.0089, 0.0133,\n",
      "        0.0170, 0.0051, 0.0123, 0.0363, 0.0069, 0.0106, 0.0063, 0.0190, 0.0070,\n",
      "        0.0091, 0.0044, 0.0158, 0.0036, 0.0104], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 43: BSNE_Loss=0.2616,local_loss: 0.2615622566852815,global_loss: 0.08521472662687302\n",
      "Q: tensor([0.0086, 0.0284, 0.0459, 0.0459, 0.0061, 0.0111, 0.0238, 0.0105, 0.0052,\n",
      "        0.0127, 0.0069, 0.0291, 0.0212, 0.0079, 0.0104, 0.0504, 0.0350, 0.0331,\n",
      "        0.0241, 0.0262, 0.0420, 0.0053, 0.0133, 0.0282, 0.0270, 0.0066, 0.0138,\n",
      "        0.0329, 0.0185, 0.0040, 0.0046, 0.0145, 0.0045, 0.0077, 0.0163, 0.0030,\n",
      "        0.0132, 0.0221, 0.1017, 0.0375, 0.0109, 0.0107, 0.0055, 0.0194, 0.0149,\n",
      "        0.0058, 0.0218, 0.0143, 0.0134, 0.0241], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 44: BSNE_Loss=0.2090,local_loss: 0.2090330505184592,global_loss: 0.32068490982055664\n",
      "Q: tensor([0.0122, 0.0172, 0.0083, 0.0226, 0.0131, 0.0116, 0.0040, 0.0191, 0.0160,\n",
      "        0.0111, 0.0134, 0.0173, 0.0106, 0.0179, 0.0174, 0.0178, 0.0194, 0.0109,\n",
      "        0.0178, 0.0272, 0.0523, 0.0124, 0.0143, 0.0119, 0.0529, 0.0121, 0.0091,\n",
      "        0.0159, 0.0105, 0.0271, 0.0103, 0.0042, 0.0165, 0.0078, 0.0729, 0.0095,\n",
      "        0.0147, 0.0197, 0.0485, 0.0575, 0.0306, 0.0184, 0.0249, 0.0092, 0.0117,\n",
      "        0.0550, 0.0047, 0.0069, 0.0380, 0.0156], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 45: BSNE_Loss=0.1397,local_loss: 0.13971858302278375,global_loss: 0.19242198765277863\n",
      "Q: tensor([0.0105, 0.0156, 0.0564, 0.0210, 0.0077, 0.0088, 0.0189, 0.0198, 0.0145,\n",
      "        0.0038, 0.0340, 0.0344, 0.0210, 0.0054, 0.0060, 0.0319, 0.0181, 0.0098,\n",
      "        0.0050, 0.0150, 0.0149, 0.0168, 0.0438, 0.0109, 0.0116, 0.0075, 0.0220,\n",
      "        0.0125, 0.0205, 0.0103, 0.0120, 0.0111, 0.0359, 0.0087, 0.0993, 0.0058,\n",
      "        0.0361, 0.0547, 0.0063, 0.0576, 0.0171, 0.0088, 0.0155, 0.0086, 0.0404,\n",
      "        0.0174, 0.0115, 0.0058, 0.0147, 0.0046], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 46: BSNE_Loss=0.2059,local_loss: 0.2058840625816687,global_loss: 0.3039233088493347\n",
      "Q: tensor([0.0618, 0.0070, 0.0124, 0.0086, 0.0276, 0.0070, 0.0186, 0.0086, 0.0473,\n",
      "        0.0144, 0.0172, 0.0171, 0.0443, 0.0035, 0.0175, 0.0076, 0.0098, 0.0444,\n",
      "        0.0109, 0.0079, 0.0056, 0.0114, 0.0183, 0.0044, 0.0341, 0.0153, 0.0089,\n",
      "        0.0218, 0.0327, 0.0126, 0.0074, 0.0217, 0.0055, 0.0565, 0.0496, 0.0123,\n",
      "        0.0259, 0.0362, 0.0155, 0.0235, 0.0217, 0.0067, 0.0143, 0.0217, 0.0320,\n",
      "        0.0068, 0.0090, 0.0182, 0.0258, 0.0311], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 47: BSNE_Loss=0.1592,local_loss: 0.15924284587208357,global_loss: -0.003782861866056919\n",
      "Q: tensor([0.0158, 0.0121, 0.0120, 0.0432, 0.0121, 0.0107, 0.0255, 0.0846, 0.0348,\n",
      "        0.0220, 0.0053, 0.0092, 0.0143, 0.0112, 0.0186, 0.0039, 0.0187, 0.0075,\n",
      "        0.0077, 0.0238, 0.0476, 0.0366, 0.0228, 0.0181, 0.0168, 0.0145, 0.0270,\n",
      "        0.0037, 0.0145, 0.0270, 0.0209, 0.0108, 0.0350, 0.0057, 0.0354, 0.0347,\n",
      "        0.0075, 0.0299, 0.0300, 0.0329, 0.0178, 0.0198, 0.0051, 0.0102, 0.0221,\n",
      "        0.0094, 0.0044, 0.0218, 0.0207, 0.0042], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 48: BSNE_Loss=0.1534,local_loss: 0.15342911731049397,global_loss: 0.4833351969718933\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0385, 0.0212, 0.0569, 0.0390, 0.0268, 0.0096, 0.0160, 0.0313, 0.0147,\n",
      "        0.0187, 0.0128, 0.0173, 0.0132, 0.0040, 0.0024, 0.0109, 0.0298, 0.0223,\n",
      "        0.0106, 0.0352, 0.0107, 0.0174, 0.0251, 0.0116, 0.0369, 0.0392, 0.0227,\n",
      "        0.0129, 0.0143, 0.0052, 0.0178, 0.0208, 0.0069, 0.0146, 0.0265, 0.0439,\n",
      "        0.0048, 0.0202, 0.0068, 0.0082, 0.0162, 0.0198, 0.0380, 0.0328, 0.0143,\n",
      "        0.0037, 0.0312, 0.0277, 0.0059, 0.0122], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 49: BSNE_Loss=0.1300,local_loss: 0.13000395172137924,global_loss: 0.3395054042339325\n",
      "Q: tensor([0.0421, 0.0230, 0.0083, 0.0159, 0.0079, 0.0365, 0.0305, 0.0100, 0.0097,\n",
      "        0.0610, 0.0183, 0.0096, 0.0199, 0.0161, 0.0117, 0.0646, 0.0117, 0.0290,\n",
      "        0.0054, 0.0129, 0.0204, 0.0066, 0.0093, 0.0218, 0.0106, 0.0059, 0.0407,\n",
      "        0.0118, 0.0097, 0.0268, 0.0226, 0.0219, 0.0318, 0.0024, 0.0117, 0.0199,\n",
      "        0.0232, 0.0098, 0.0189, 0.0502, 0.0049, 0.0216, 0.0275, 0.0199, 0.0189,\n",
      "        0.0152, 0.0067, 0.0217, 0.0239, 0.0195], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 50: BSNE_Loss=0.1272,local_loss: 0.12721816205247988,global_loss: 0.08100292831659317\n",
      "Q: tensor([0.0182, 0.0079, 0.0067, 0.0221, 0.0261, 0.0445, 0.0088, 0.0122, 0.0230,\n",
      "        0.0222, 0.0086, 0.0080, 0.0173, 0.0125, 0.0212, 0.0189, 0.0137, 0.0608,\n",
      "        0.0071, 0.0446, 0.0075, 0.0494, 0.0171, 0.0148, 0.0120, 0.0150, 0.0128,\n",
      "        0.0230, 0.0075, 0.0077, 0.0388, 0.0329, 0.0849, 0.0141, 0.0158, 0.0294,\n",
      "        0.0333, 0.0141, 0.0071, 0.0139, 0.0178, 0.0172, 0.0308, 0.0068, 0.0062,\n",
      "        0.0195, 0.0070, 0.0078, 0.0093, 0.0222], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 51: BSNE_Loss=0.1386,local_loss: 0.13863463518156063,global_loss: 0.35647493600845337\n",
      "Q: tensor([0.0076, 0.0108, 0.0556, 0.0106, 0.0069, 0.0224, 0.0093, 0.0060, 0.0084,\n",
      "        0.0434, 0.0110, 0.0177, 0.0179, 0.0040, 0.0200, 0.0385, 0.0197, 0.0070,\n",
      "        0.0189, 0.0192, 0.0163, 0.0191, 0.0098, 0.0035, 0.0147, 0.0190, 0.0057,\n",
      "        0.0311, 0.0097, 0.0079, 0.0329, 0.0916, 0.0321, 0.0083, 0.0455, 0.0161,\n",
      "        0.0171, 0.0286, 0.0245, 0.0092, 0.0610, 0.0204, 0.0259, 0.0189, 0.0029,\n",
      "        0.0215, 0.0129, 0.0112, 0.0240, 0.0037], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 52: BSNE_Loss=0.1948,local_loss: 0.1947925484990783,global_loss: 0.2333582639694214\n",
      "Q: tensor([0.0149, 0.0311, 0.0105, 0.0125, 0.0228, 0.0246, 0.0294, 0.0296, 0.0090,\n",
      "        0.0655, 0.0125, 0.0168, 0.0188, 0.0118, 0.0059, 0.0109, 0.0113, 0.0227,\n",
      "        0.0092, 0.0145, 0.0223, 0.0271, 0.0139, 0.0214, 0.0153, 0.0207, 0.0189,\n",
      "        0.0222, 0.0518, 0.0112, 0.0098, 0.0119, 0.0188, 0.0052, 0.0059, 0.0070,\n",
      "        0.0089, 0.0242, 0.0086, 0.0124, 0.0147, 0.0133, 0.0279, 0.0174, 0.0372,\n",
      "        0.0242, 0.0159, 0.0394, 0.0144, 0.0737], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 53: BSNE_Loss=0.0951,local_loss: 0.09511835883339606,global_loss: 0.30412885546684265\n",
      "Q: tensor([0.0124, 0.0129, 0.0116, 0.0119, 0.0101, 0.0065, 0.0965, 0.1675, 0.0095,\n",
      "        0.0224, 0.0087, 0.0375, 0.0064, 0.0028, 0.0109, 0.0260, 0.0223, 0.0055,\n",
      "        0.0029, 0.0288, 0.0158, 0.0122, 0.0067, 0.0138, 0.0078, 0.0056, 0.0091,\n",
      "        0.0092, 0.0110, 0.0136, 0.0466, 0.0245, 0.0114, 0.0089, 0.0076, 0.0153,\n",
      "        0.0198, 0.0190, 0.0537, 0.0290, 0.0185, 0.0150, 0.0154, 0.0176, 0.0171,\n",
      "        0.0192, 0.0108, 0.0096, 0.0146, 0.0085], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 54: BSNE_Loss=0.2732,local_loss: 0.2731996016829139,global_loss: 0.07926611602306366\n",
      "Q: tensor([0.0166, 0.0282, 0.0094, 0.0048, 0.0092, 0.0117, 0.0311, 0.0181, 0.0108,\n",
      "        0.0412, 0.0068, 0.0073, 0.0061, 0.0113, 0.0676, 0.0075, 0.0082, 0.0339,\n",
      "        0.0304, 0.0070, 0.0181, 0.0086, 0.0092, 0.0030, 0.0103, 0.0177, 0.0096,\n",
      "        0.0080, 0.0118, 0.0270, 0.0111, 0.0235, 0.0112, 0.0151, 0.0393, 0.0247,\n",
      "        0.0401, 0.0142, 0.0176, 0.0949, 0.0149, 0.0271, 0.0317, 0.0075, 0.0442,\n",
      "        0.0097, 0.0166, 0.0354, 0.0201, 0.0104], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 55: BSNE_Loss=0.1780,local_loss: 0.17798568229868095,global_loss: -0.005870433058589697\n",
      "Q: tensor([0.0120, 0.0087, 0.0065, 0.0141, 0.0217, 0.0350, 0.0233, 0.0148, 0.0173,\n",
      "        0.0093, 0.0212, 0.0104, 0.0050, 0.0102, 0.0320, 0.0307, 0.0180, 0.0147,\n",
      "        0.0468, 0.0309, 0.0115, 0.0071, 0.0067, 0.0599, 0.0079, 0.0033, 0.0213,\n",
      "        0.0217, 0.0325, 0.0036, 0.0135, 0.0195, 0.0349, 0.0213, 0.0093, 0.0083,\n",
      "        0.0204, 0.0117, 0.0111, 0.0106, 0.0383, 0.0122, 0.0395, 0.0166, 0.0396,\n",
      "        0.0182, 0.0076, 0.0560, 0.0144, 0.0390], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 56: BSNE_Loss=0.1385,local_loss: 0.13846745864156296,global_loss: 0.1247095838189125\n",
      "Q: tensor([0.0247, 0.0118, 0.0255, 0.0304, 0.0112, 0.0213, 0.0231, 0.0087, 0.0260,\n",
      "        0.0036, 0.0155, 0.0126, 0.0186, 0.0051, 0.0136, 0.0042, 0.0371, 0.0429,\n",
      "        0.0369, 0.0307, 0.0122, 0.0134, 0.0238, 0.0356, 0.0126, 0.0076, 0.0049,\n",
      "        0.0323, 0.0133, 0.0211, 0.0418, 0.0189, 0.0190, 0.0265, 0.0409, 0.0141,\n",
      "        0.0135, 0.0094, 0.0064, 0.0106, 0.0045, 0.0449, 0.0105, 0.0081, 0.0059,\n",
      "        0.0424, 0.0223, 0.0276, 0.0072, 0.0452], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 57: BSNE_Loss=0.1366,local_loss: 0.13657498132642115,global_loss: 0.23306408524513245\n",
      "Q: tensor([0.0386, 0.0087, 0.0137, 0.0166, 0.0282, 0.0363, 0.0320, 0.0214, 0.0164,\n",
      "        0.0137, 0.0135, 0.0156, 0.0210, 0.0140, 0.0112, 0.0163, 0.0116, 0.0173,\n",
      "        0.0080, 0.0699, 0.0045, 0.0144, 0.0327, 0.0164, 0.0145, 0.0041, 0.0182,\n",
      "        0.0297, 0.0402, 0.0273, 0.0055, 0.0081, 0.0177, 0.0118, 0.0145, 0.0118,\n",
      "        0.0173, 0.0089, 0.0168, 0.0103, 0.0156, 0.0169, 0.0437, 0.0117, 0.0412,\n",
      "        0.0330, 0.0138, 0.0141, 0.0300, 0.0313], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 58: BSNE_Loss=0.0840,local_loss: 0.08398363978196877,global_loss: 0.026837218552827835\n",
      "Q: tensor([0.0115, 0.0660, 0.0121, 0.0231, 0.0273, 0.0305, 0.0275, 0.0619, 0.0113,\n",
      "        0.0026, 0.0161, 0.0060, 0.0497, 0.0261, 0.0244, 0.0361, 0.0128, 0.0404,\n",
      "        0.0076, 0.0361, 0.0151, 0.0114, 0.0203, 0.0093, 0.0102, 0.0143, 0.0104,\n",
      "        0.0077, 0.0541, 0.0066, 0.0117, 0.0081, 0.0083, 0.0057, 0.0067, 0.0290,\n",
      "        0.0218, 0.0472, 0.0062, 0.0048, 0.0384, 0.0028, 0.0117, 0.0140, 0.0182,\n",
      "        0.0179, 0.0109, 0.0198, 0.0204, 0.0079], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 59: BSNE_Loss=0.1958,local_loss: 0.1957552523462939,global_loss: 0.17386899888515472\n",
      "Q: tensor([0.0062, 0.0088, 0.0341, 0.0071, 0.1317, 0.0095, 0.0170, 0.0149, 0.0056,\n",
      "        0.0195, 0.0338, 0.0215, 0.0157, 0.0276, 0.0059, 0.0070, 0.0192, 0.0038,\n",
      "        0.0054, 0.0211, 0.0136, 0.0181, 0.0344, 0.0267, 0.0326, 0.0108, 0.0039,\n",
      "        0.0244, 0.0161, 0.0107, 0.0469, 0.0268, 0.0224, 0.0138, 0.0400, 0.0205,\n",
      "        0.0107, 0.0199, 0.0194, 0.0036, 0.0191, 0.0040, 0.0099, 0.0112, 0.0095,\n",
      "        0.0110, 0.0208, 0.0184, 0.0595, 0.0056], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 60: BSNE_Loss=0.2165,local_loss: 0.21649284228676458,global_loss: 0.5480138063430786\n",
      "Q: tensor([0.0694, 0.0184, 0.0137, 0.0080, 0.0147, 0.0360, 0.0089, 0.0066, 0.0121,\n",
      "        0.0272, 0.0071, 0.0084, 0.0087, 0.0092, 0.0090, 0.0115, 0.0078, 0.0546,\n",
      "        0.0505, 0.0108, 0.0545, 0.0091, 0.0061, 0.0083, 0.0188, 0.0210, 0.0052,\n",
      "        0.0221, 0.0169, 0.0303, 0.0081, 0.0121, 0.0151, 0.0123, 0.0231, 0.0738,\n",
      "        0.0330, 0.0126, 0.0300, 0.0107, 0.0152, 0.0686, 0.0052, 0.0093, 0.0045,\n",
      "        0.0138, 0.0153, 0.0232, 0.0100, 0.0193], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 61: BSNE_Loss=0.1991,local_loss: 0.19912803892148517,global_loss: -0.010669623501598835\n",
      "Q: tensor([0.0211, 0.0289, 0.0933, 0.0142, 0.0119, 0.0119, 0.0203, 0.0211, 0.0282,\n",
      "        0.0225, 0.0220, 0.0137, 0.0157, 0.0141, 0.0120, 0.0088, 0.0132, 0.0099,\n",
      "        0.0235, 0.0051, 0.0100, 0.0151, 0.0031, 0.0127, 0.0217, 0.0578, 0.0158,\n",
      "        0.0300, 0.0047, 0.0105, 0.0097, 0.0075, 0.0287, 0.0174, 0.0052, 0.0182,\n",
      "        0.0154, 0.0137, 0.0132, 0.0088, 0.0186, 0.0358, 0.0456, 0.0045, 0.0211,\n",
      "        0.0273, 0.0191, 0.0582, 0.0246, 0.0145], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 62: BSNE_Loss=0.1402,local_loss: 0.14023566617039332,global_loss: 0.1731688678264618\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0194, 0.0193, 0.0222, 0.0034, 0.0028, 0.0079, 0.0140, 0.0098, 0.0082,\n",
      "        0.0104, 0.0043, 0.0109, 0.0431, 0.0207, 0.0073, 0.0332, 0.0346, 0.0410,\n",
      "        0.0130, 0.0131, 0.0081, 0.0043, 0.0164, 0.0091, 0.0121, 0.0120, 0.0242,\n",
      "        0.0217, 0.0337, 0.0134, 0.0133, 0.0247, 0.0178, 0.0505, 0.0135, 0.0150,\n",
      "        0.0164, 0.0312, 0.0119, 0.0285, 0.0138, 0.0040, 0.0128, 0.0129, 0.0256,\n",
      "        0.0084, 0.0129, 0.0266, 0.0061, 0.1607], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 63: BSNE_Loss=0.2265,local_loss: 0.22648258920070524,global_loss: 0.08120430260896683\n",
      "Q: tensor([0.0037, 0.0175, 0.0105, 0.0050, 0.0128, 0.0253, 0.0239, 0.0029, 0.0206,\n",
      "        0.0125, 0.0036, 0.0889, 0.0181, 0.0113, 0.0177, 0.0087, 0.0038, 0.0458,\n",
      "        0.0125, 0.0335, 0.0187, 0.0347, 0.0093, 0.0100, 0.0160, 0.0157, 0.0346,\n",
      "        0.0493, 0.0060, 0.0204, 0.0081, 0.0177, 0.0159, 0.0140, 0.0045, 0.0169,\n",
      "        0.0275, 0.0126, 0.0169, 0.1267, 0.0061, 0.0122, 0.0039, 0.0092, 0.0165,\n",
      "        0.0095, 0.0092, 0.0498, 0.0186, 0.0107], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 64: BSNE_Loss=0.2645,local_loss: 0.2645299318880351,global_loss: 0.04189392179250717\n",
      "Q: tensor([0.0643, 0.0068, 0.0281, 0.0181, 0.0098, 0.0135, 0.0157, 0.0147, 0.0331,\n",
      "        0.0339, 0.0170, 0.0056, 0.0283, 0.0218, 0.0063, 0.0157, 0.0463, 0.0111,\n",
      "        0.0148, 0.0270, 0.0902, 0.0130, 0.0080, 0.0114, 0.0186, 0.0083, 0.0174,\n",
      "        0.0222, 0.0202, 0.0045, 0.0114, 0.0122, 0.0105, 0.0125, 0.0105, 0.0109,\n",
      "        0.0223, 0.0074, 0.0227, 0.0093, 0.0321, 0.0044, 0.0252, 0.0262, 0.0317,\n",
      "        0.0299, 0.0067, 0.0167, 0.0261, 0.0259], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 65: BSNE_Loss=0.1316,local_loss: 0.13157782304185958,global_loss: 0.33186575770378113\n",
      "Q: tensor([0.0178, 0.0104, 0.0108, 0.0173, 0.0142, 0.0088, 0.0221, 0.0167, 0.0287,\n",
      "        0.0962, 0.0540, 0.0209, 0.0094, 0.0614, 0.0103, 0.0118, 0.0064, 0.0223,\n",
      "        0.0123, 0.0037, 0.0234, 0.0035, 0.0207, 0.0230, 0.0075, 0.0189, 0.0474,\n",
      "        0.0338, 0.0073, 0.0086, 0.0087, 0.0337, 0.0354, 0.0157, 0.0069, 0.0122,\n",
      "        0.0044, 0.0091, 0.0131, 0.0063, 0.0433, 0.0091, 0.0196, 0.0100, 0.0752,\n",
      "        0.0038, 0.0046, 0.0148, 0.0042, 0.0203], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 66: BSNE_Loss=0.2478,local_loss: 0.24775601690888344,global_loss: -0.20554077625274658\n",
      "Q: tensor([0.0295, 0.0208, 0.0128, 0.0093, 0.0097, 0.0271, 0.0417, 0.0057, 0.0594,\n",
      "        0.0075, 0.0125, 0.0319, 0.0108, 0.0218, 0.0196, 0.0178, 0.0083, 0.0103,\n",
      "        0.0201, 0.0050, 0.0236, 0.0095, 0.0371, 0.0119, 0.0127, 0.0258, 0.0144,\n",
      "        0.0126, 0.0117, 0.0090, 0.0117, 0.0141, 0.0113, 0.0388, 0.0321, 0.0630,\n",
      "        0.0170, 0.0222, 0.0045, 0.0074, 0.0101, 0.0215, 0.0159, 0.0942, 0.0119,\n",
      "        0.0070, 0.0168, 0.0171, 0.0046, 0.0289], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 67: BSNE_Loss=0.1578,local_loss: 0.15778831856725725,global_loss: 0.6595302224159241\n",
      "Q: tensor([0.0146, 0.0372, 0.0496, 0.0172, 0.0185, 0.0084, 0.0076, 0.0034, 0.0104,\n",
      "        0.0134, 0.0157, 0.0176, 0.0203, 0.0136, 0.0300, 0.0154, 0.0129, 0.0242,\n",
      "        0.0250, 0.0121, 0.0083, 0.0220, 0.0346, 0.0291, 0.0358, 0.0136, 0.0128,\n",
      "        0.0069, 0.0124, 0.0209, 0.0534, 0.0102, 0.0122, 0.0185, 0.0300, 0.0230,\n",
      "        0.0407, 0.0164, 0.0076, 0.0211, 0.0166, 0.0107, 0.0114, 0.0096, 0.0408,\n",
      "        0.0254, 0.0206, 0.0400, 0.0221, 0.0059], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 68: BSNE_Loss=0.0786,local_loss: 0.07856853854181352,global_loss: 0.2829928994178772\n",
      "Q: tensor([0.0096, 0.0255, 0.0233, 0.0049, 0.0304, 0.0191, 0.0289, 0.0051, 0.0720,\n",
      "        0.0109, 0.0423, 0.0114, 0.0130, 0.0045, 0.0159, 0.0289, 0.0049, 0.0228,\n",
      "        0.0167, 0.0871, 0.0068, 0.0031, 0.0553, 0.0065, 0.0166, 0.0169, 0.0080,\n",
      "        0.0112, 0.0085, 0.0229, 0.0197, 0.0124, 0.0220, 0.0032, 0.0178, 0.0055,\n",
      "        0.0225, 0.0123, 0.0126, 0.0189, 0.0384, 0.0137, 0.0145, 0.0261, 0.0426,\n",
      "        0.0215, 0.0053, 0.0217, 0.0180, 0.0186], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 69: BSNE_Loss=0.1941,local_loss: 0.194141701831364,global_loss: -0.010026243515312672\n",
      "Q: tensor([0.0200, 0.0140, 0.0138, 0.0116, 0.0320, 0.0166, 0.0077, 0.0239, 0.0113,\n",
      "        0.0420, 0.0124, 0.0089, 0.0194, 0.0121, 0.0155, 0.0156, 0.0469, 0.0176,\n",
      "        0.0131, 0.0051, 0.0168, 0.0083, 0.0070, 0.0073, 0.0461, 0.0259, 0.0039,\n",
      "        0.0355, 0.0158, 0.0219, 0.0342, 0.0209, 0.0212, 0.0046, 0.1055, 0.0165,\n",
      "        0.0186, 0.0286, 0.0158, 0.0362, 0.0118, 0.0295, 0.0111, 0.0087, 0.0087,\n",
      "        0.0058, 0.0273, 0.0344, 0.0084, 0.0041], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 70: BSNE_Loss=0.1600,local_loss: 0.15999165185088887,global_loss: 0.260735422372818\n",
      "Q: tensor([0.0323, 0.0079, 0.0158, 0.0098, 0.0301, 0.0282, 0.0256, 0.0067, 0.0047,\n",
      "        0.0080, 0.0099, 0.0069, 0.0126, 0.0088, 0.0078, 0.0109, 0.0239, 0.0290,\n",
      "        0.0192, 0.0526, 0.0024, 0.0803, 0.0073, 0.0433, 0.0106, 0.0417, 0.0388,\n",
      "        0.0225, 0.0238, 0.0265, 0.0153, 0.0065, 0.0129, 0.0086, 0.0270, 0.0316,\n",
      "        0.0088, 0.0372, 0.0133, 0.0030, 0.0111, 0.0621, 0.0091, 0.0047, 0.0336,\n",
      "        0.0217, 0.0038, 0.0250, 0.0108, 0.0061], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 71: BSNE_Loss=0.2246,local_loss: 0.22461356102459348,global_loss: -0.033713966608047485\n",
      "Q: tensor([0.0334, 0.0333, 0.0149, 0.0112, 0.0055, 0.0149, 0.0206, 0.0078, 0.0235,\n",
      "        0.0159, 0.0220, 0.0050, 0.0227, 0.0187, 0.0119, 0.0041, 0.1390, 0.0167,\n",
      "        0.0140, 0.0130, 0.0173, 0.0165, 0.0113, 0.0231, 0.0195, 0.0469, 0.0096,\n",
      "        0.0089, 0.0141, 0.0081, 0.0083, 0.0280, 0.0168, 0.0130, 0.0127, 0.0203,\n",
      "        0.0181, 0.0277, 0.0105, 0.0173, 0.0321, 0.0184, 0.0285, 0.0106, 0.0225,\n",
      "        0.0087, 0.0111, 0.0140, 0.0356, 0.0225], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 72: BSNE_Loss=0.1276,local_loss: 0.12760405015772644,global_loss: 0.36638399958610535\n",
      "Q: tensor([0.0129, 0.0155, 0.0333, 0.0139, 0.0151, 0.0170, 0.0050, 0.0773, 0.0174,\n",
      "        0.0105, 0.0129, 0.0261, 0.0185, 0.0123, 0.0376, 0.0090, 0.0225, 0.0119,\n",
      "        0.0101, 0.0157, 0.0490, 0.0235, 0.0281, 0.0328, 0.0326, 0.0230, 0.0224,\n",
      "        0.0097, 0.0117, 0.0149, 0.0039, 0.0189, 0.0238, 0.0120, 0.0200, 0.0300,\n",
      "        0.0448, 0.0164, 0.0080, 0.0116, 0.0130, 0.0142, 0.0283, 0.0177, 0.0108,\n",
      "        0.0029, 0.0162, 0.0213, 0.0106, 0.0332], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 73: BSNE_Loss=0.0925,local_loss: 0.0925301398885139,global_loss: 0.11829385906457901\n",
      "Q: tensor([0.0152, 0.0333, 0.0476, 0.0104, 0.0081, 0.0393, 0.0170, 0.0169, 0.0211,\n",
      "        0.0075, 0.0097, 0.0633, 0.0347, 0.0283, 0.0221, 0.0070, 0.0188, 0.0064,\n",
      "        0.0060, 0.0242, 0.0165, 0.0086, 0.0291, 0.0186, 0.0184, 0.0131, 0.0343,\n",
      "        0.0357, 0.0072, 0.0064, 0.0099, 0.0574, 0.0100, 0.0044, 0.0176, 0.0093,\n",
      "        0.0387, 0.0181, 0.0181, 0.0086, 0.0409, 0.0195, 0.0223, 0.0081, 0.0127,\n",
      "        0.0100, 0.0144, 0.0216, 0.0123, 0.0212], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 74: BSNE_Loss=0.1218,local_loss: 0.1217569702407431,global_loss: 0.3965575695037842\n",
      "Q: tensor([0.0181, 0.0266, 0.0115, 0.0879, 0.0396, 0.0569, 0.0080, 0.0260, 0.0126,\n",
      "        0.0168, 0.0496, 0.0146, 0.0119, 0.0056, 0.0199, 0.0174, 0.0257, 0.0078,\n",
      "        0.0063, 0.0065, 0.0063, 0.0329, 0.0267, 0.0027, 0.0144, 0.0058, 0.0231,\n",
      "        0.0186, 0.0052, 0.0214, 0.0196, 0.0347, 0.0113, 0.0074, 0.0096, 0.0225,\n",
      "        0.0146, 0.0036, 0.0479, 0.0048, 0.0302, 0.0154, 0.0139, 0.0167, 0.0298,\n",
      "        0.0414, 0.0018, 0.0086, 0.0117, 0.0279], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 75: BSNE_Loss=0.2139,local_loss: 0.21394772101546905,global_loss: 0.4847569167613983\n",
      "Q: tensor([0.0207, 0.0153, 0.0292, 0.0610, 0.0229, 0.0240, 0.0126, 0.0094, 0.0130,\n",
      "        0.0163, 0.0056, 0.0106, 0.0239, 0.0141, 0.0067, 0.0195, 0.0354, 0.0805,\n",
      "        0.0397, 0.0421, 0.0198, 0.0113, 0.0437, 0.0069, 0.0055, 0.0186, 0.0230,\n",
      "        0.0219, 0.0080, 0.0068, 0.0037, 0.0561, 0.0042, 0.0101, 0.0173, 0.0130,\n",
      "        0.0052, 0.0089, 0.0113, 0.0126, 0.0074, 0.0097, 0.0386, 0.0065, 0.0125,\n",
      "        0.0091, 0.0066, 0.0372, 0.0191, 0.0430], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 76: BSNE_Loss=0.1966,local_loss: 0.19660566514832709,global_loss: -0.0036888127215206623\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0604, 0.0130, 0.0147, 0.0086, 0.0147, 0.0187, 0.0102, 0.0159, 0.0139,\n",
      "        0.0048, 0.0088, 0.0539, 0.0230, 0.0069, 0.0129, 0.0165, 0.0132, 0.0086,\n",
      "        0.0175, 0.0155, 0.0406, 0.0949, 0.0287, 0.0075, 0.0165, 0.0212, 0.0084,\n",
      "        0.0102, 0.0384, 0.0156, 0.0258, 0.0198, 0.0071, 0.0211, 0.0119, 0.0323,\n",
      "        0.0190, 0.0087, 0.0221, 0.0033, 0.0318, 0.0069, 0.0052, 0.0106, 0.0576,\n",
      "        0.0351, 0.0059, 0.0031, 0.0140, 0.0250], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 77: BSNE_Loss=0.1915,local_loss: 0.19147391076772147,global_loss: -0.12688881158828735\n",
      "Q: tensor([0.0585, 0.0198, 0.0038, 0.0102, 0.0166, 0.0071, 0.0284, 0.0051, 0.0055,\n",
      "        0.0435, 0.0258, 0.0057, 0.0097, 0.0328, 0.0213, 0.0093, 0.0233, 0.0153,\n",
      "        0.0123, 0.0634, 0.0254, 0.0055, 0.0045, 0.0135, 0.0129, 0.0684, 0.0114,\n",
      "        0.0168, 0.0175, 0.0094, 0.0120, 0.0125, 0.0151, 0.0164, 0.0140, 0.0146,\n",
      "        0.0389, 0.0030, 0.0282, 0.0351, 0.0164, 0.0041, 0.0414, 0.0135, 0.0099,\n",
      "        0.0613, 0.0188, 0.0221, 0.0136, 0.0064], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 78: BSNE_Loss=0.2000,local_loss: 0.19998013577929563,global_loss: 0.22422586381435394\n",
      "Q: tensor([0.0286, 0.0382, 0.0037, 0.0056, 0.0346, 0.0292, 0.0485, 0.0251, 0.0237,\n",
      "        0.0128, 0.0189, 0.0147, 0.0131, 0.0258, 0.0197, 0.0250, 0.0024, 0.0363,\n",
      "        0.0613, 0.0115, 0.0220, 0.0080, 0.0059, 0.0735, 0.0143, 0.0181, 0.0021,\n",
      "        0.0244, 0.0168, 0.0188, 0.0184, 0.0141, 0.0148, 0.0102, 0.0097, 0.0188,\n",
      "        0.0060, 0.0524, 0.0202, 0.0136, 0.0244, 0.0176, 0.0056, 0.0047, 0.0167,\n",
      "        0.0395, 0.0039, 0.0109, 0.0086, 0.0072], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 79: BSNE_Loss=0.1973,local_loss: 0.19734534340104973,global_loss: 0.311486154794693\n",
      "Q: tensor([0.0178, 0.0113, 0.0319, 0.0380, 0.0068, 0.0121, 0.0266, 0.0354, 0.0271,\n",
      "        0.0178, 0.0053, 0.0470, 0.0136, 0.0489, 0.0541, 0.0044, 0.0153, 0.0694,\n",
      "        0.0164, 0.0062, 0.0126, 0.0070, 0.0179, 0.0271, 0.0060, 0.0064, 0.0202,\n",
      "        0.0149, 0.0197, 0.0147, 0.0236, 0.0163, 0.0140, 0.0101, 0.0221, 0.0095,\n",
      "        0.0110, 0.0382, 0.0105, 0.0060, 0.0195, 0.0522, 0.0082, 0.0082, 0.0078,\n",
      "        0.0059, 0.0171, 0.0139, 0.0430, 0.0111], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 80: BSNE_Loss=0.1597,local_loss: 0.15972813045255935,global_loss: 0.11763149499893188\n",
      "Q: tensor([0.0117, 0.0236, 0.0139, 0.0141, 0.0273, 0.0086, 0.0073, 0.0160, 0.0112,\n",
      "        0.0165, 0.0137, 0.0330, 0.0217, 0.0189, 0.0212, 0.0166, 0.0220, 0.0127,\n",
      "        0.0322, 0.0124, 0.0339, 0.0106, 0.0240, 0.0183, 0.0289, 0.0607, 0.0137,\n",
      "        0.0112, 0.0129, 0.0187, 0.0402, 0.0099, 0.0143, 0.0117, 0.0271, 0.0034,\n",
      "        0.0076, 0.0082, 0.0792, 0.0054, 0.0289, 0.0072, 0.0213, 0.0095, 0.0128,\n",
      "        0.0142, 0.0129, 0.0110, 0.0516, 0.0361], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 81: BSNE_Loss=0.1115,local_loss: 0.11149280734107782,global_loss: 0.4611430764198303\n",
      "Q: tensor([0.0124, 0.0178, 0.0108, 0.0142, 0.0167, 0.0233, 0.0286, 0.0053, 0.0101,\n",
      "        0.0235, 0.0192, 0.0125, 0.0348, 0.0087, 0.0144, 0.0101, 0.0056, 0.0102,\n",
      "        0.0051, 0.0725, 0.0154, 0.0531, 0.0050, 0.0137, 0.0070, 0.0198, 0.0087,\n",
      "        0.0160, 0.0096, 0.0317, 0.0305, 0.0345, 0.0145, 0.0156, 0.0284, 0.0551,\n",
      "        0.0085, 0.0092, 0.0832, 0.0130, 0.0119, 0.0295, 0.0236, 0.0056, 0.0048,\n",
      "        0.0352, 0.0062, 0.0110, 0.0385, 0.0054], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 82: BSNE_Loss=0.1917,local_loss: 0.19171595740157388,global_loss: 0.3628538250923157\n",
      "Q: tensor([0.0200, 0.0247, 0.0098, 0.0223, 0.0254, 0.0319, 0.0339, 0.0072, 0.0264,\n",
      "        0.0166, 0.0171, 0.0128, 0.0392, 0.0192, 0.0215, 0.0221, 0.0396, 0.0508,\n",
      "        0.0189, 0.0263, 0.0117, 0.0269, 0.0206, 0.0132, 0.0091, 0.0112, 0.0072,\n",
      "        0.0504, 0.0256, 0.0104, 0.0323, 0.0065, 0.0061, 0.0322, 0.0127, 0.0159,\n",
      "        0.0176, 0.0193, 0.0150, 0.0043, 0.0041, 0.0098, 0.0097, 0.0186, 0.0320,\n",
      "        0.0196, 0.0192, 0.0297, 0.0184, 0.0047], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 83: BSNE_Loss=0.0844,local_loss: 0.08442904003271748,global_loss: 0.32129985094070435\n",
      "Q: tensor([0.0071, 0.0065, 0.0125, 0.0165, 0.0065, 0.0344, 0.0613, 0.0239, 0.0328,\n",
      "        0.0046, 0.0167, 0.0377, 0.0096, 0.0951, 0.0155, 0.0085, 0.0122, 0.0385,\n",
      "        0.0066, 0.0279, 0.0116, 0.0381, 0.0345, 0.0218, 0.0063, 0.0111, 0.0096,\n",
      "        0.0093, 0.0365, 0.0019, 0.0112, 0.0152, 0.0175, 0.0120, 0.0111, 0.0552,\n",
      "        0.0092, 0.0039, 0.0046, 0.0243, 0.0223, 0.0226, 0.0267, 0.0105, 0.0101,\n",
      "        0.0120, 0.0110, 0.0407, 0.0163, 0.0084], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 84: BSNE_Loss=0.2137,local_loss: 0.2137268808646001,global_loss: 0.2989724576473236\n",
      "Q: tensor([0.0093, 0.0123, 0.0171, 0.0113, 0.0184, 0.0080, 0.0143, 0.0218, 0.0281,\n",
      "        0.0119, 0.0088, 0.0110, 0.0050, 0.0031, 0.0074, 0.0103, 0.0318, 0.0132,\n",
      "        0.0354, 0.0630, 0.0181, 0.0808, 0.0215, 0.0179, 0.0040, 0.0450, 0.0071,\n",
      "        0.0487, 0.0167, 0.0390, 0.0115, 0.0235, 0.0091, 0.0319, 0.0092, 0.0316,\n",
      "        0.0165, 0.0223, 0.0151, 0.0172, 0.0119, 0.0055, 0.0151, 0.0268, 0.0045,\n",
      "        0.0182, 0.0344, 0.0172, 0.0224, 0.0157], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 85: BSNE_Loss=0.1536,local_loss: 0.15360952203497613,global_loss: 0.06830288469791412\n",
      "Q: tensor([0.0077, 0.0223, 0.0135, 0.0178, 0.0090, 0.0058, 0.0315, 0.0098, 0.0547,\n",
      "        0.0136, 0.0088, 0.0140, 0.0186, 0.0140, 0.0153, 0.0085, 0.0733, 0.0235,\n",
      "        0.0135, 0.0106, 0.0506, 0.0173, 0.0086, 0.0174, 0.0136, 0.0044, 0.0944,\n",
      "        0.0159, 0.0287, 0.0050, 0.0071, 0.0297, 0.0088, 0.0322, 0.0193, 0.0069,\n",
      "        0.0085, 0.0088, 0.0508, 0.0201, 0.0407, 0.0227, 0.0273, 0.0071, 0.0191,\n",
      "        0.0052, 0.0076, 0.0205, 0.0078, 0.0082], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 86: BSNE_Loss=0.1975,local_loss: 0.19745705856016266,global_loss: 0.05770863965153694\n",
      "Q: tensor([0.0080, 0.0093, 0.0240, 0.0380, 0.0102, 0.0120, 0.0086, 0.0146, 0.0190,\n",
      "        0.0117, 0.0070, 0.0187, 0.0140, 0.0093, 0.0199, 0.0055, 0.0680, 0.0108,\n",
      "        0.0077, 0.0264, 0.0293, 0.0349, 0.0170, 0.0060, 0.0266, 0.0143, 0.0357,\n",
      "        0.0198, 0.0235, 0.0275, 0.0150, 0.0131, 0.0067, 0.0431, 0.0159, 0.0682,\n",
      "        0.0395, 0.0157, 0.0086, 0.0059, 0.0133, 0.0238, 0.0550, 0.0122, 0.0178,\n",
      "        0.0086, 0.0054, 0.0258, 0.0080, 0.0209], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 87: BSNE_Loss=0.1384,local_loss: 0.13840161858623914,global_loss: 0.3109758198261261\n",
      "Q: tensor([0.0251, 0.0393, 0.0124, 0.0105, 0.0387, 0.0506, 0.0282, 0.0113, 0.0146,\n",
      "        0.0168, 0.0074, 0.0308, 0.0055, 0.0066, 0.0044, 0.0405, 0.0072, 0.0062,\n",
      "        0.0047, 0.0167, 0.0079, 0.0145, 0.0229, 0.0196, 0.0062, 0.0863, 0.0150,\n",
      "        0.0031, 0.0156, 0.0184, 0.0069, 0.0195, 0.0151, 0.0115, 0.0131, 0.0326,\n",
      "        0.0128, 0.0202, 0.0080, 0.0228, 0.0311, 0.0264, 0.0171, 0.0126, 0.0388,\n",
      "        0.0231, 0.0397, 0.0145, 0.0257, 0.0218], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 88: BSNE_Loss=0.1524,local_loss: 0.15235819263568712,global_loss: 0.20249837636947632\n",
      "Q: tensor([0.0066, 0.0307, 0.0166, 0.0588, 0.0348, 0.0061, 0.0632, 0.0573, 0.0182,\n",
      "        0.0069, 0.0098, 0.0151, 0.0052, 0.0372, 0.0171, 0.0069, 0.0016, 0.0281,\n",
      "        0.0038, 0.0508, 0.0338, 0.0157, 0.0096, 0.0104, 0.0325, 0.0098, 0.0315,\n",
      "        0.0050, 0.0073, 0.0087, 0.0371, 0.0056, 0.0592, 0.0264, 0.0194, 0.0086,\n",
      "        0.0141, 0.0136, 0.0129, 0.0184, 0.0196, 0.0028, 0.0309, 0.0060, 0.0053,\n",
      "        0.0171, 0.0156, 0.0142, 0.0185, 0.0158], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 89: BSNE_Loss=0.2363,local_loss: 0.23628925792423872,global_loss: 0.010233786888420582\n",
      "Q: tensor([0.0878, 0.0036, 0.0462, 0.0687, 0.0189, 0.0082, 0.0178, 0.0118, 0.0043,\n",
      "        0.0018, 0.0084, 0.0435, 0.0533, 0.0735, 0.0043, 0.0111, 0.0157, 0.0133,\n",
      "        0.0177, 0.0132, 0.0092, 0.0150, 0.0127, 0.0062, 0.0189, 0.0085, 0.0050,\n",
      "        0.0591, 0.0475, 0.0162, 0.0102, 0.0339, 0.0126, 0.0123, 0.0215, 0.0107,\n",
      "        0.0123, 0.0366, 0.0048, 0.0091, 0.0125, 0.0123, 0.0133, 0.0038, 0.0145,\n",
      "        0.0056, 0.0083, 0.0186, 0.0066, 0.0185], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 90: BSNE_Loss=0.2877,local_loss: 0.28767491241850635,global_loss: 0.005379130598157644\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0381, 0.0336, 0.0046, 0.0306, 0.0052, 0.0229, 0.0222, 0.0365, 0.0028,\n",
      "        0.0167, 0.0079, 0.0399, 0.0229, 0.0348, 0.0042, 0.0202, 0.0128, 0.0310,\n",
      "        0.0074, 0.0119, 0.0261, 0.0159, 0.0365, 0.0049, 0.0080, 0.0322, 0.0117,\n",
      "        0.0132, 0.0109, 0.0060, 0.0397, 0.0304, 0.0166, 0.0085, 0.0119, 0.0895,\n",
      "        0.0436, 0.0163, 0.0241, 0.0088, 0.0138, 0.0103, 0.0084, 0.0112, 0.0244,\n",
      "        0.0140, 0.0151, 0.0185, 0.0125, 0.0106], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 91: BSNE_Loss=0.1614,local_loss: 0.16137032532848672,global_loss: 0.05835600942373276\n",
      "Q: tensor([0.0134, 0.0176, 0.0516, 0.0119, 0.0169, 0.0244, 0.0086, 0.0054, 0.0040,\n",
      "        0.0363, 0.0074, 0.0143, 0.0097, 0.0105, 0.0109, 0.0176, 0.0073, 0.0057,\n",
      "        0.0112, 0.1008, 0.0126, 0.0225, 0.0225, 0.0251, 0.0096, 0.0165, 0.0105,\n",
      "        0.0106, 0.0210, 0.0083, 0.0058, 0.0460, 0.0410, 0.0111, 0.0387, 0.0151,\n",
      "        0.0142, 0.0204, 0.0156, 0.0238, 0.0532, 0.0145, 0.0080, 0.0108, 0.0061,\n",
      "        0.0567, 0.0153, 0.0172, 0.0103, 0.0315], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 92: BSNE_Loss=0.1745,local_loss: 0.17454442010627388,global_loss: 0.2738616466522217\n",
      "Q: tensor([0.0122, 0.0181, 0.0152, 0.0168, 0.1341, 0.0106, 0.0381, 0.0081, 0.0130,\n",
      "        0.0048, 0.0565, 0.0092, 0.0152, 0.0056, 0.0136, 0.0054, 0.0074, 0.0224,\n",
      "        0.0171, 0.0324, 0.0050, 0.0257, 0.0078, 0.0220, 0.0850, 0.0186, 0.0088,\n",
      "        0.0276, 0.0105, 0.0076, 0.0232, 0.0018, 0.0304, 0.0142, 0.0278, 0.0212,\n",
      "        0.0154, 0.0197, 0.0118, 0.0183, 0.0209, 0.0116, 0.0130, 0.0073, 0.0256,\n",
      "        0.0144, 0.0095, 0.0182, 0.0088, 0.0127], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 93: BSNE_Loss=0.2176,local_loss: 0.21757011990772848,global_loss: 0.28416383266448975\n",
      "Q: tensor([0.0056, 0.0518, 0.0161, 0.0079, 0.0798, 0.0199, 0.0104, 0.0076, 0.0278,\n",
      "        0.0161, 0.0031, 0.0069, 0.0155, 0.0165, 0.0038, 0.0124, 0.0040, 0.0613,\n",
      "        0.0154, 0.0211, 0.0107, 0.0461, 0.0102, 0.0299, 0.0038, 0.0116, 0.0313,\n",
      "        0.0046, 0.0145, 0.0083, 0.0490, 0.0229, 0.0162, 0.0124, 0.0131, 0.0271,\n",
      "        0.0273, 0.0197, 0.0117, 0.0125, 0.0108, 0.0422, 0.0393, 0.0043, 0.0146,\n",
      "        0.0380, 0.0090, 0.0142, 0.0314, 0.0104], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 94: BSNE_Loss=0.2087,local_loss: 0.20871986433223594,global_loss: 0.03259526938199997\n",
      "Q: tensor([0.0251, 0.0201, 0.0134, 0.0047, 0.0097, 0.0263, 0.0218, 0.0398, 0.0214,\n",
      "        0.0158, 0.0036, 0.0145, 0.0291, 0.0278, 0.0103, 0.0106, 0.0409, 0.0258,\n",
      "        0.0104, 0.0289, 0.0159, 0.0117, 0.0318, 0.0265, 0.0188, 0.0161, 0.0200,\n",
      "        0.0113, 0.0256, 0.0060, 0.0328, 0.0162, 0.0724, 0.0069, 0.0203, 0.0128,\n",
      "        0.0304, 0.0496, 0.0154, 0.0234, 0.0169, 0.0047, 0.0137, 0.0041, 0.0057,\n",
      "        0.0092, 0.0092, 0.0326, 0.0213, 0.0185], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 95: BSNE_Loss=0.1137,local_loss: 0.1137480476710255,global_loss: 0.2332543283700943\n",
      "Q: tensor([0.0168, 0.0172, 0.0846, 0.0337, 0.0089, 0.0106, 0.0198, 0.0098, 0.0192,\n",
      "        0.0237, 0.0200, 0.0141, 0.0031, 0.0148, 0.0040, 0.0161, 0.0123, 0.0124,\n",
      "        0.0092, 0.0115, 0.0681, 0.0521, 0.0069, 0.0108, 0.0065, 0.0081, 0.0062,\n",
      "        0.0072, 0.0155, 0.0105, 0.0115, 0.0272, 0.0214, 0.0105, 0.0466, 0.0262,\n",
      "        0.0351, 0.0579, 0.0114, 0.0281, 0.0070, 0.0162, 0.0230, 0.0462, 0.0198,\n",
      "        0.0146, 0.0146, 0.0195, 0.0038, 0.0056], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 96: BSNE_Loss=0.1934,local_loss: 0.19338849334313454,global_loss: -0.011129029095172882\n",
      "Q: tensor([0.0138, 0.0169, 0.0537, 0.0410, 0.0255, 0.0070, 0.0080, 0.0105, 0.0614,\n",
      "        0.0086, 0.0079, 0.0066, 0.0053, 0.0085, 0.0090, 0.0064, 0.0188, 0.0044,\n",
      "        0.0188, 0.0119, 0.0089, 0.0323, 0.0220, 0.0066, 0.0285, 0.0140, 0.0298,\n",
      "        0.0161, 0.0074, 0.0074, 0.0092, 0.0130, 0.0194, 0.0106, 0.0060, 0.0576,\n",
      "        0.0541, 0.0128, 0.0124, 0.0507, 0.0157, 0.0097, 0.0320, 0.0116, 0.0269,\n",
      "        0.0030, 0.0440, 0.0249, 0.0095, 0.0596], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 97: BSNE_Loss=0.2104,local_loss: 0.2103951310590794,global_loss: 0.22775858640670776\n",
      "Q: tensor([0.0147, 0.0369, 0.0086, 0.0052, 0.0259, 0.0598, 0.0188, 0.0304, 0.0264,\n",
      "        0.0151, 0.0097, 0.0114, 0.0049, 0.0347, 0.0101, 0.0185, 0.0120, 0.0059,\n",
      "        0.0269, 0.0113, 0.0210, 0.0112, 0.0654, 0.0121, 0.0364, 0.0152, 0.0093,\n",
      "        0.0046, 0.0274, 0.0196, 0.0201, 0.0141, 0.0207, 0.0292, 0.0137, 0.0115,\n",
      "        0.0105, 0.0315, 0.0148, 0.0170, 0.0071, 0.0398, 0.0236, 0.0137, 0.0289,\n",
      "        0.0101, 0.0079, 0.0273, 0.0393, 0.0099], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 98: BSNE_Loss=0.1090,local_loss: 0.1089731048664088,global_loss: 0.20871897041797638\n",
      "Q: tensor([0.0293, 0.0230, 0.0143, 0.0118, 0.0220, 0.0123, 0.0210, 0.0131, 0.0156,\n",
      "        0.0198, 0.0133, 0.0304, 0.0337, 0.0110, 0.0094, 0.0327, 0.0257, 0.0251,\n",
      "        0.0374, 0.0225, 0.0210, 0.0035, 0.0327, 0.0251, 0.0203, 0.0321, 0.0667,\n",
      "        0.0111, 0.0065, 0.0064, 0.0219, 0.0597, 0.0132, 0.0110, 0.0087, 0.0214,\n",
      "        0.0105, 0.0133, 0.0040, 0.0054, 0.0258, 0.0212, 0.0077, 0.0238, 0.0414,\n",
      "        0.0179, 0.0035, 0.0194, 0.0052, 0.0161], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 99: BSNE_Loss=0.1242,local_loss: 0.12420282482171854,global_loss: 0.20694711804389954\n",
      "Q: tensor([0.0954, 0.0086, 0.0061, 0.0036, 0.0141, 0.0865, 0.0120, 0.0177, 0.0069,\n",
      "        0.0144, 0.0047, 0.0081, 0.0152, 0.0044, 0.0109, 0.0061, 0.0223, 0.0298,\n",
      "        0.0270, 0.0166, 0.0098, 0.0148, 0.0348, 0.0287, 0.0096, 0.0247, 0.0230,\n",
      "        0.0127, 0.0106, 0.0118, 0.0105, 0.0129, 0.0680, 0.0155, 0.0074, 0.0307,\n",
      "        0.0063, 0.0186, 0.0248, 0.0083, 0.0134, 0.0070, 0.0405, 0.0229, 0.0241,\n",
      "        0.0092, 0.0107, 0.0436, 0.0274, 0.0074], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 100: BSNE_Loss=0.2135,local_loss: 0.21352639860770886,global_loss: -0.010439638048410416\n",
      "Q: tensor([0.0119, 0.0310, 0.0124, 0.0211, 0.0218, 0.0791, 0.0233, 0.0092, 0.0131,\n",
      "        0.0289, 0.0042, 0.0130, 0.0549, 0.0146, 0.0014, 0.0277, 0.0104, 0.0202,\n",
      "        0.0099, 0.0318, 0.0063, 0.0168, 0.0047, 0.0086, 0.0096, 0.0075, 0.0204,\n",
      "        0.0132, 0.0301, 0.0109, 0.0468, 0.0199, 0.0121, 0.0354, 0.0275, 0.0093,\n",
      "        0.0165, 0.0453, 0.0074, 0.0116, 0.0325, 0.0144, 0.0182, 0.0156, 0.0072,\n",
      "        0.0109, 0.0035, 0.0759, 0.0153, 0.0065], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 101: BSNE_Loss=0.2017,local_loss: 0.20165252865892747,global_loss: -0.14637677371501923\n",
      "Q: tensor([0.0805, 0.0204, 0.0263, 0.0137, 0.0109, 0.0259, 0.0346, 0.0202, 0.0073,\n",
      "        0.0048, 0.0150, 0.0060, 0.0133, 0.0072, 0.0066, 0.0076, 0.0121, 0.0378,\n",
      "        0.0121, 0.0572, 0.0174, 0.0125, 0.0207, 0.0359, 0.0267, 0.0066, 0.0228,\n",
      "        0.0167, 0.0185, 0.0176, 0.0084, 0.0349, 0.0063, 0.0256, 0.0184, 0.0160,\n",
      "        0.0126, 0.0377, 0.0065, 0.0174, 0.0117, 0.0484, 0.0460, 0.0179, 0.0278,\n",
      "        0.0075, 0.0059, 0.0121, 0.0146, 0.0094], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 102: BSNE_Loss=0.1427,local_loss: 0.14268739506367514,global_loss: 0.07915102690458298\n",
      "Q: tensor([0.0051, 0.0334, 0.0264, 0.0488, 0.0159, 0.0262, 0.0138, 0.0123, 0.0085,\n",
      "        0.0457, 0.0095, 0.0145, 0.0179, 0.0055, 0.0041, 0.0061, 0.0049, 0.0098,\n",
      "        0.0160, 0.0225, 0.0190, 0.0652, 0.0087, 0.0060, 0.0238, 0.0205, 0.0089,\n",
      "        0.0068, 0.0198, 0.0092, 0.0376, 0.0983, 0.0086, 0.0266, 0.0335, 0.0228,\n",
      "        0.0196, 0.0073, 0.0266, 0.0082, 0.0078, 0.0555, 0.0138, 0.0245, 0.0115,\n",
      "        0.0126, 0.0032, 0.0211, 0.0161, 0.0100], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 103: BSNE_Loss=0.2075,local_loss: 0.2075189490192501,global_loss: 0.2929997146129608\n",
      "Q: tensor([0.0161, 0.0118, 0.0299, 0.0053, 0.0090, 0.0143, 0.0336, 0.0168, 0.0297,\n",
      "        0.0270, 0.0142, 0.0101, 0.0082, 0.0075, 0.0044, 0.0166, 0.0072, 0.0277,\n",
      "        0.0188, 0.0099, 0.0185, 0.0132, 0.0154, 0.0227, 0.0123, 0.0589, 0.0098,\n",
      "        0.0100, 0.0928, 0.0053, 0.0056, 0.0349, 0.0715, 0.0048, 0.0134, 0.0050,\n",
      "        0.0161, 0.0076, 0.0398, 0.0230, 0.0116, 0.0095, 0.0398, 0.0132, 0.0100,\n",
      "        0.0048, 0.0023, 0.0583, 0.0138, 0.0379], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 104: BSNE_Loss=0.2379,local_loss: 0.2378947563144757,global_loss: -0.077633336186409\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0117, 0.0192, 0.0062, 0.0088, 0.0446, 0.0431, 0.0077, 0.0186, 0.0189,\n",
      "        0.0157, 0.0973, 0.0143, 0.0066, 0.0136, 0.0093, 0.0138, 0.0063, 0.0094,\n",
      "        0.0139, 0.0067, 0.0029, 0.0212, 0.0045, 0.0220, 0.0314, 0.0626, 0.0177,\n",
      "        0.0245, 0.0110, 0.0123, 0.0317, 0.0121, 0.0165, 0.0445, 0.0180, 0.0123,\n",
      "        0.0263, 0.0062, 0.0243, 0.0123, 0.0166, 0.0122, 0.0108, 0.0583, 0.0103,\n",
      "        0.0145, 0.0097, 0.0280, 0.0125, 0.0267], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 105: BSNE_Loss=0.1715,local_loss: 0.1715058126031633,global_loss: 0.37210071086883545\n",
      "Q: tensor([0.0239, 0.0121, 0.0151, 0.0111, 0.0028, 0.0310, 0.0049, 0.0096, 0.0101,\n",
      "        0.0202, 0.0081, 0.0103, 0.0365, 0.0434, 0.0114, 0.0375, 0.0260, 0.0173,\n",
      "        0.0454, 0.0392, 0.0205, 0.0157, 0.0098, 0.0885, 0.0177, 0.0205, 0.0141,\n",
      "        0.0155, 0.0090, 0.0074, 0.0109, 0.0496, 0.0094, 0.0080, 0.0692, 0.0088,\n",
      "        0.0223, 0.0130, 0.0156, 0.0083, 0.0317, 0.0150, 0.0165, 0.0091, 0.0143,\n",
      "        0.0197, 0.0123, 0.0123, 0.0129, 0.0068], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 106: BSNE_Loss=0.1583,local_loss: 0.1583039024913276,global_loss: 0.2923692464828491\n",
      "Q: tensor([0.0106, 0.0816, 0.0124, 0.0044, 0.0095, 0.0056, 0.0286, 0.0043, 0.0252,\n",
      "        0.0224, 0.0086, 0.0268, 0.0031, 0.0078, 0.0324, 0.0133, 0.0126, 0.0205,\n",
      "        0.0157, 0.0397, 0.0099, 0.0065, 0.0030, 0.0202, 0.0078, 0.0348, 0.0111,\n",
      "        0.0231, 0.0089, 0.0288, 0.0272, 0.0551, 0.0080, 0.0533, 0.0088, 0.0153,\n",
      "        0.0093, 0.0528, 0.0053, 0.0302, 0.0167, 0.0397, 0.0036, 0.0104, 0.0111,\n",
      "        0.0044, 0.0239, 0.0647, 0.0123, 0.0088], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 107: BSNE_Loss=0.2510,local_loss: 0.25100554989831203,global_loss: 0.06602407991886139\n",
      "Q: tensor([0.0269, 0.0215, 0.0301, 0.0153, 0.0066, 0.0622, 0.0229, 0.0105, 0.0075,\n",
      "        0.0225, 0.0093, 0.0047, 0.0091, 0.0056, 0.0142, 0.0063, 0.0795, 0.0100,\n",
      "        0.0169, 0.0195, 0.0336, 0.0074, 0.0156, 0.0095, 0.0195, 0.0274, 0.0262,\n",
      "        0.0107, 0.0151, 0.0345, 0.0493, 0.0086, 0.0512, 0.0071, 0.0087, 0.0072,\n",
      "        0.0328, 0.0134, 0.0139, 0.0055, 0.0157, 0.0191, 0.0170, 0.0057, 0.0189,\n",
      "        0.0482, 0.0310, 0.0120, 0.0138, 0.0199], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 108: BSNE_Loss=0.1597,local_loss: 0.1596639762240641,global_loss: 0.07495668530464172\n",
      "Q: tensor([0.0057, 0.0080, 0.0156, 0.0179, 0.0326, 0.0057, 0.0626, 0.0270, 0.0196,\n",
      "        0.0177, 0.0784, 0.0137, 0.0282, 0.0370, 0.0154, 0.0167, 0.0219, 0.0106,\n",
      "        0.0127, 0.0155, 0.0132, 0.0073, 0.0320, 0.0185, 0.0158, 0.0143, 0.0078,\n",
      "        0.0059, 0.0128, 0.0071, 0.0149, 0.0097, 0.0078, 0.0199, 0.0251, 0.0165,\n",
      "        0.0211, 0.0168, 0.0126, 0.0183, 0.0093, 0.0349, 0.0443, 0.0438, 0.0210,\n",
      "        0.0129, 0.0248, 0.0172, 0.0188, 0.0131], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 109: BSNE_Loss=0.0976,local_loss: 0.09764431437302337,global_loss: 0.34395086765289307\n",
      "Q: tensor([0.0147, 0.0324, 0.0070, 0.0036, 0.0155, 0.0055, 0.0365, 0.0572, 0.0105,\n",
      "        0.0302, 0.0248, 0.0398, 0.0056, 0.0098, 0.0136, 0.0143, 0.0129, 0.0313,\n",
      "        0.0089, 0.0142, 0.0120, 0.0088, 0.0223, 0.0063, 0.0254, 0.0187, 0.0102,\n",
      "        0.0255, 0.0159, 0.0207, 0.0149, 0.0172, 0.0188, 0.0112, 0.0172, 0.0182,\n",
      "        0.0124, 0.0475, 0.0253, 0.0272, 0.0284, 0.0286, 0.0066, 0.0403, 0.0190,\n",
      "        0.0096, 0.0177, 0.0183, 0.0564, 0.0113], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 110: BSNE_Loss=0.0997,local_loss: 0.09972391902290209,global_loss: 0.15866371989250183\n",
      "Q: tensor([0.0171, 0.0281, 0.0055, 0.0091, 0.0255, 0.0082, 0.0370, 0.0192, 0.0089,\n",
      "        0.0301, 0.0107, 0.0465, 0.0090, 0.0243, 0.0109, 0.0162, 0.0508, 0.0079,\n",
      "        0.0113, 0.0141, 0.0114, 0.0096, 0.0251, 0.0199, 0.0164, 0.0151, 0.0283,\n",
      "        0.0498, 0.0186, 0.0272, 0.0224, 0.0087, 0.0283, 0.0135, 0.0220, 0.0127,\n",
      "        0.0433, 0.0300, 0.0810, 0.0235, 0.0062, 0.0222, 0.0095, 0.0059, 0.0177,\n",
      "        0.0102, 0.0067, 0.0135, 0.0028, 0.0081], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 111: BSNE_Loss=0.1391,local_loss: 0.1391116044817223,global_loss: 0.3801029324531555\n",
      "Q: tensor([0.0038, 0.0085, 0.0194, 0.0142, 0.0312, 0.0134, 0.0522, 0.0216, 0.0274,\n",
      "        0.0335, 0.0166, 0.0075, 0.0137, 0.0133, 0.0129, 0.0586, 0.0164, 0.0344,\n",
      "        0.0611, 0.0369, 0.0156, 0.0123, 0.0108, 0.0229, 0.0117, 0.0136, 0.0055,\n",
      "        0.0188, 0.0209, 0.0053, 0.0060, 0.0122, 0.0388, 0.0150, 0.0160, 0.0126,\n",
      "        0.0129, 0.0789, 0.0077, 0.0091, 0.0130, 0.0158, 0.0253, 0.0073, 0.0122,\n",
      "        0.0179, 0.0110, 0.0238, 0.0182, 0.0122], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 112: BSNE_Loss=0.1345,local_loss: 0.13448216842836822,global_loss: 0.2695518732070923\n",
      "Q: tensor([0.0129, 0.0171, 0.0409, 0.0389, 0.0208, 0.0141, 0.0055, 0.0136, 0.0166,\n",
      "        0.0081, 0.0068, 0.0044, 0.0521, 0.0021, 0.0042, 0.0078, 0.0059, 0.0758,\n",
      "        0.0264, 0.0059, 0.0746, 0.0275, 0.0081, 0.0061, 0.0229, 0.0141, 0.0098,\n",
      "        0.0198, 0.0111, 0.0054, 0.0452, 0.0137, 0.0176, 0.0050, 0.0115, 0.0067,\n",
      "        0.0301, 0.0072, 0.0109, 0.0156, 0.0191, 0.0062, 0.0414, 0.0148, 0.0564,\n",
      "        0.0212, 0.0164, 0.0504, 0.0243, 0.0068], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 113: BSNE_Loss=0.2538,local_loss: 0.25377674843831594,global_loss: 0.27847743034362793\n",
      "Q: tensor([0.0165, 0.0122, 0.0179, 0.0634, 0.0339, 0.0175, 0.0074, 0.0104, 0.0274,\n",
      "        0.0100, 0.0076, 0.0096, 0.0126, 0.0081, 0.0547, 0.0051, 0.0110, 0.0222,\n",
      "        0.0068, 0.0111, 0.0076, 0.0234, 0.0259, 0.0453, 0.0338, 0.0060, 0.0581,\n",
      "        0.0367, 0.0201, 0.0083, 0.0089, 0.0231, 0.0163, 0.0123, 0.0178, 0.0103,\n",
      "        0.0046, 0.0109, 0.0302, 0.0239, 0.0076, 0.0063, 0.0255, 0.0110, 0.0212,\n",
      "        0.0064, 0.0083, 0.0559, 0.0557, 0.0130], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 114: BSNE_Loss=0.1746,local_loss: 0.17458877329414021,global_loss: 0.1567368358373642\n",
      "Q: tensor([0.0580, 0.0129, 0.0363, 0.0220, 0.0036, 0.0359, 0.0110, 0.0201, 0.0231,\n",
      "        0.0541, 0.0074, 0.0122, 0.0034, 0.0327, 0.0325, 0.0159, 0.0154, 0.0269,\n",
      "        0.0070, 0.0164, 0.0077, 0.0070, 0.0044, 0.0113, 0.0072, 0.0299, 0.0328,\n",
      "        0.0304, 0.0113, 0.0107, 0.0199, 0.0136, 0.0037, 0.0087, 0.0208, 0.0085,\n",
      "        0.0063, 0.0263, 0.0095, 0.0165, 0.0212, 0.0188, 0.0142, 0.0189, 0.0334,\n",
      "        0.0258, 0.0133, 0.0692, 0.0395, 0.0125], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 115: BSNE_Loss=0.1599,local_loss: 0.15990392108125595,global_loss: 0.40876373648643494\n",
      "Q: tensor([0.0114, 0.0218, 0.0109, 0.0068, 0.0444, 0.0147, 0.0064, 0.0294, 0.0109,\n",
      "        0.0197, 0.0342, 0.0121, 0.0211, 0.0778, 0.0162, 0.0395, 0.0111, 0.0368,\n",
      "        0.0069, 0.0195, 0.0060, 0.0260, 0.0032, 0.0242, 0.0049, 0.0137, 0.0215,\n",
      "        0.0152, 0.0225, 0.0098, 0.0419, 0.0333, 0.0307, 0.0122, 0.0130, 0.0504,\n",
      "        0.0175, 0.0302, 0.0045, 0.0170, 0.0094, 0.0041, 0.0238, 0.0084, 0.0167,\n",
      "        0.0085, 0.0302, 0.0238, 0.0153, 0.0104], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 116: BSNE_Loss=0.1460,local_loss: 0.14598403041208247,global_loss: 0.22361265122890472\n",
      "Q: tensor([0.0386, 0.0185, 0.0216, 0.0089, 0.0260, 0.0198, 0.0269, 0.0023, 0.0074,\n",
      "        0.0129, 0.0116, 0.0072, 0.0206, 0.0653, 0.0226, 0.0067, 0.0091, 0.0082,\n",
      "        0.0383, 0.0581, 0.0217, 0.0114, 0.0076, 0.0518, 0.0100, 0.0170, 0.0212,\n",
      "        0.0694, 0.0103, 0.0122, 0.0086, 0.0079, 0.0255, 0.0098, 0.0113, 0.0117,\n",
      "        0.0360, 0.0281, 0.0197, 0.0125, 0.0147, 0.0204, 0.0298, 0.0043, 0.0075,\n",
      "        0.0413, 0.0138, 0.0181, 0.0136, 0.0021], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 117: BSNE_Loss=0.1852,local_loss: 0.18518282762417376,global_loss: 0.013852526433765888\n",
      "Q: tensor([0.0246, 0.0117, 0.0210, 0.0083, 0.0141, 0.0051, 0.0092, 0.0151, 0.0292,\n",
      "        0.0265, 0.0478, 0.0144, 0.0209, 0.0078, 0.0227, 0.0112, 0.0301, 0.0103,\n",
      "        0.0131, 0.0151, 0.0252, 0.0322, 0.0089, 0.0253, 0.0362, 0.0060, 0.0374,\n",
      "        0.0086, 0.0283, 0.0216, 0.0300, 0.0182, 0.0107, 0.0135, 0.0643, 0.0219,\n",
      "        0.0180, 0.0184, 0.0449, 0.0206, 0.0311, 0.0079, 0.0124, 0.0206, 0.0079,\n",
      "        0.0134, 0.0110, 0.0243, 0.0152, 0.0080], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 118: BSNE_Loss=0.0768,local_loss: 0.07683355973714746,global_loss: 0.11466921120882034\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0121, 0.0166, 0.0070, 0.0141, 0.0242, 0.0156, 0.0153, 0.0066, 0.0506,\n",
      "        0.0365, 0.0128, 0.0164, 0.0254, 0.0415, 0.0088, 0.0032, 0.0095, 0.0064,\n",
      "        0.0133, 0.0201, 0.0145, 0.0059, 0.0274, 0.0052, 0.0131, 0.0176, 0.0208,\n",
      "        0.0764, 0.0522, 0.0563, 0.0184, 0.0033, 0.0192, 0.0323, 0.0232, 0.0121,\n",
      "        0.0109, 0.0124, 0.0239, 0.0162, 0.0155, 0.0098, 0.0493, 0.0208, 0.0084,\n",
      "        0.0218, 0.0225, 0.0191, 0.0058, 0.0095], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 119: BSNE_Loss=0.1565,local_loss: 0.15652939588637912,global_loss: 0.18955403566360474\n",
      "Q: tensor([0.0146, 0.0352, 0.0133, 0.0046, 0.0114, 0.0110, 0.0121, 0.0329, 0.0231,\n",
      "        0.0133, 0.0056, 0.0282, 0.0815, 0.0067, 0.0582, 0.0808, 0.0089, 0.0134,\n",
      "        0.0155, 0.0372, 0.0127, 0.0086, 0.0214, 0.0069, 0.0092, 0.0118, 0.0097,\n",
      "        0.0316, 0.0306, 0.0433, 0.0157, 0.0253, 0.0089, 0.0131, 0.0200, 0.0131,\n",
      "        0.0101, 0.0169, 0.0094, 0.0241, 0.0176, 0.0083, 0.0156, 0.0192, 0.0052,\n",
      "        0.0062, 0.0386, 0.0187, 0.0069, 0.0136], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 120: BSNE_Loss=0.1661,local_loss: 0.16614804312714893,global_loss: 0.5070472955703735\n",
      "Q: tensor([0.0227, 0.0162, 0.0341, 0.0096, 0.0104, 0.0171, 0.0176, 0.0335, 0.0089,\n",
      "        0.0079, 0.0097, 0.0070, 0.0152, 0.0202, 0.0334, 0.0104, 0.0308, 0.0149,\n",
      "        0.0045, 0.0102, 0.0241, 0.0036, 0.0173, 0.0225, 0.0184, 0.0220, 0.0146,\n",
      "        0.0588, 0.0367, 0.0256, 0.0046, 0.0300, 0.0142, 0.0110, 0.0579, 0.0170,\n",
      "        0.0125, 0.0085, 0.1390, 0.0081, 0.0276, 0.0165, 0.0053, 0.0042, 0.0031,\n",
      "        0.0073, 0.0098, 0.0318, 0.0044, 0.0093], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 121: BSNE_Loss=0.2395,local_loss: 0.23946048781875007,global_loss: 0.4081285893917084\n",
      "Q: tensor([0.0122, 0.0257, 0.0152, 0.0133, 0.0259, 0.0083, 0.0109, 0.0157, 0.0136,\n",
      "        0.0272, 0.0189, 0.0079, 0.0210, 0.0104, 0.0303, 0.0168, 0.0390, 0.0367,\n",
      "        0.0100, 0.0114, 0.0295, 0.0094, 0.0253, 0.0107, 0.0219, 0.0507, 0.0065,\n",
      "        0.0152, 0.0046, 0.0203, 0.0614, 0.0139, 0.0163, 0.0279, 0.0505, 0.0276,\n",
      "        0.0119, 0.0281, 0.0309, 0.0121, 0.0274, 0.0050, 0.0323, 0.0084, 0.0098,\n",
      "        0.0207, 0.0192, 0.0127, 0.0150, 0.0043], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 122: BSNE_Loss=0.0963,local_loss: 0.09630787372662683,global_loss: 0.14246679842472076\n",
      "Q: tensor([0.0192, 0.0202, 0.0151, 0.0111, 0.0188, 0.0064, 0.0133, 0.0054, 0.0245,\n",
      "        0.0149, 0.0223, 0.0577, 0.0149, 0.0079, 0.0096, 0.0239, 0.0145, 0.0127,\n",
      "        0.0067, 0.0097, 0.0080, 0.0023, 0.0134, 0.0108, 0.0104, 0.0080, 0.0136,\n",
      "        0.0060, 0.0052, 0.0238, 0.0157, 0.0237, 0.0652, 0.0184, 0.0099, 0.0136,\n",
      "        0.0067, 0.0053, 0.0061, 0.0425, 0.0085, 0.0278, 0.0319, 0.0089, 0.1112,\n",
      "        0.0096, 0.0195, 0.1107, 0.0035, 0.0310], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 123: BSNE_Loss=0.2761,local_loss: 0.27612199800871096,global_loss: 0.046291664242744446\n",
      "Q: tensor([0.0329, 0.0096, 0.0495, 0.0090, 0.0106, 0.0095, 0.0088, 0.0182, 0.0231,\n",
      "        0.0090, 0.0144, 0.0184, 0.0107, 0.0118, 0.0103, 0.0277, 0.0092, 0.0097,\n",
      "        0.0189, 0.0276, 0.0110, 0.0075, 0.0054, 0.0081, 0.0185, 0.0397, 0.0298,\n",
      "        0.0025, 0.0200, 0.0135, 0.0151, 0.0413, 0.0094, 0.0186, 0.0096, 0.0195,\n",
      "        0.0202, 0.0162, 0.0550, 0.0113, 0.0241, 0.0635, 0.0159, 0.0289, 0.0714,\n",
      "        0.0276, 0.0084, 0.0319, 0.0041, 0.0129], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 124: BSNE_Loss=0.1514,local_loss: 0.15138344225334519,global_loss: 0.33288559317588806\n",
      "Q: tensor([0.0761, 0.0194, 0.0241, 0.0080, 0.0125, 0.0194, 0.0370, 0.0149, 0.0088,\n",
      "        0.0130, 0.0077, 0.0061, 0.0039, 0.0126, 0.0222, 0.0073, 0.0153, 0.0164,\n",
      "        0.0262, 0.0593, 0.0138, 0.0108, 0.0201, 0.0426, 0.0087, 0.0484, 0.0183,\n",
      "        0.0070, 0.0152, 0.0028, 0.0561, 0.0151, 0.0110, 0.0148, 0.0074, 0.0077,\n",
      "        0.0064, 0.0277, 0.0458, 0.0020, 0.0213, 0.0278, 0.0384, 0.0067, 0.0053,\n",
      "        0.0257, 0.0159, 0.0378, 0.0190, 0.0100], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 125: BSNE_Loss=0.2067,local_loss: 0.20674036812572,global_loss: 0.06790410727262497\n",
      "Q: tensor([0.0095, 0.0304, 0.0106, 0.0050, 0.0102, 0.0092, 0.0263, 0.0189, 0.0221,\n",
      "        0.0088, 0.0113, 0.0230, 0.0328, 0.0047, 0.0135, 0.0071, 0.0219, 0.0129,\n",
      "        0.0062, 0.0245, 0.0204, 0.0528, 0.0136, 0.0360, 0.0075, 0.0444, 0.0189,\n",
      "        0.0312, 0.0037, 0.0426, 0.0067, 0.0082, 0.0263, 0.0162, 0.0086, 0.0613,\n",
      "        0.0019, 0.0074, 0.0074, 0.0431, 0.0075, 0.0744, 0.0253, 0.0101, 0.0182,\n",
      "        0.0581, 0.0076, 0.0064, 0.0183, 0.0071], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 126: BSNE_Loss=0.2280,local_loss: 0.2279503376594979,global_loss: 0.5500493049621582\n",
      "Q: tensor([0.0346, 0.0140, 0.0252, 0.0682, 0.0341, 0.0061, 0.0094, 0.0307, 0.0131,\n",
      "        0.0150, 0.0377, 0.0300, 0.0171, 0.0252, 0.0228, 0.0104, 0.0122, 0.0122,\n",
      "        0.0256, 0.0137, 0.0054, 0.0072, 0.0125, 0.0071, 0.0094, 0.0206, 0.0171,\n",
      "        0.0416, 0.0177, 0.0047, 0.0307, 0.0867, 0.0491, 0.0103, 0.0392, 0.0178,\n",
      "        0.0185, 0.0078, 0.0191, 0.0173, 0.0058, 0.0137, 0.0071, 0.0054, 0.0095,\n",
      "        0.0151, 0.0154, 0.0131, 0.0069, 0.0108], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 127: BSNE_Loss=0.1574,local_loss: 0.15741982744219737,global_loss: 0.188295379281044\n",
      "Q: tensor([0.0050, 0.0280, 0.0581, 0.0280, 0.0121, 0.0068, 0.0173, 0.0109, 0.0171,\n",
      "        0.0206, 0.0218, 0.0029, 0.0241, 0.0128, 0.0033, 0.0145, 0.0131, 0.0198,\n",
      "        0.0118, 0.0362, 0.0745, 0.0077, 0.0041, 0.0151, 0.0100, 0.0164, 0.0431,\n",
      "        0.0384, 0.0200, 0.0086, 0.0119, 0.0289, 0.0061, 0.0155, 0.0123, 0.0197,\n",
      "        0.0521, 0.0137, 0.0128, 0.0153, 0.0296, 0.0342, 0.0208, 0.0147, 0.0352,\n",
      "        0.0180, 0.0068, 0.0198, 0.0174, 0.0130], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 128: BSNE_Loss=0.1428,local_loss: 0.1428217731235055,global_loss: 0.46589869260787964\n",
      "Q: tensor([0.0367, 0.0053, 0.0277, 0.0082, 0.0139, 0.0180, 0.0148, 0.0148, 0.0161,\n",
      "        0.0367, 0.0030, 0.0076, 0.0094, 0.0212, 0.0082, 0.0096, 0.0203, 0.0152,\n",
      "        0.0225, 0.0067, 0.0094, 0.0076, 0.0341, 0.0265, 0.0213, 0.0232, 0.0203,\n",
      "        0.0303, 0.0095, 0.0088, 0.0074, 0.0487, 0.0964, 0.0685, 0.0179, 0.0289,\n",
      "        0.0057, 0.0109, 0.0204, 0.0283, 0.0210, 0.0141, 0.0179, 0.0049, 0.0126,\n",
      "        0.0083, 0.0305, 0.0183, 0.0107, 0.0216], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 129: BSNE_Loss=0.1595,local_loss: 0.1595264267483461,global_loss: 0.21980154514312744\n",
      "Q: tensor([0.0091, 0.0081, 0.0090, 0.0083, 0.0065, 0.0138, 0.0250, 0.0184, 0.0172,\n",
      "        0.0074, 0.0251, 0.0117, 0.0473, 0.0049, 0.0318, 0.0360, 0.0127, 0.0422,\n",
      "        0.0119, 0.0107, 0.0095, 0.0122, 0.0043, 0.0223, 0.0157, 0.0229, 0.0110,\n",
      "        0.0045, 0.0255, 0.0233, 0.0095, 0.0323, 0.0078, 0.0172, 0.0405, 0.0109,\n",
      "        0.0955, 0.0155, 0.0134, 0.0274, 0.0295, 0.0117, 0.0145, 0.0289, 0.0420,\n",
      "        0.0223, 0.0041, 0.0141, 0.0476, 0.0070], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 130: BSNE_Loss=0.1651,local_loss: 0.16509779264836622,global_loss: 0.2633192837238312\n",
      "Q: tensor([0.0103, 0.0115, 0.0425, 0.0189, 0.0246, 0.0220, 0.0989, 0.0103, 0.0220,\n",
      "        0.0234, 0.0407, 0.0283, 0.0037, 0.0132, 0.0075, 0.0048, 0.0143, 0.0143,\n",
      "        0.0699, 0.0365, 0.0138, 0.0184, 0.0365, 0.0111, 0.0162, 0.0081, 0.0387,\n",
      "        0.0082, 0.0175, 0.0072, 0.0220, 0.0199, 0.0204, 0.0196, 0.0194, 0.0028,\n",
      "        0.0150, 0.0037, 0.0048, 0.0276, 0.0097, 0.0115, 0.0150, 0.0107, 0.0077,\n",
      "        0.0359, 0.0101, 0.0243, 0.0106, 0.0158], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 131: BSNE_Loss=0.1800,local_loss: 0.18000402449257166,global_loss: 0.31668388843536377\n",
      "Q: tensor([0.0765, 0.0196, 0.0222, 0.0213, 0.0220, 0.0081, 0.0252, 0.0453, 0.0276,\n",
      "        0.0127, 0.0073, 0.0237, 0.0052, 0.0164, 0.0070, 0.0123, 0.0060, 0.0289,\n",
      "        0.0102, 0.0089, 0.0050, 0.0079, 0.0284, 0.0073, 0.0150, 0.0160, 0.0116,\n",
      "        0.0241, 0.0163, 0.0459, 0.0084, 0.0146, 0.0519, 0.0133, 0.0181, 0.0124,\n",
      "        0.0053, 0.0132, 0.0449, 0.0299, 0.0144, 0.0126, 0.0360, 0.0271, 0.0253,\n",
      "        0.0206, 0.0123, 0.0212, 0.0244, 0.0103], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 132: BSNE_Loss=0.1201,local_loss: 0.12011428638717815,global_loss: 0.3852836787700653\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Q: tensor([0.0063, 0.0314, 0.0299, 0.0051, 0.0071, 0.0109, 0.0109, 0.0482, 0.0265,\n",
      "        0.0217, 0.0299, 0.0044, 0.0105, 0.0054, 0.0113, 0.0340, 0.0248, 0.0086,\n",
      "        0.0164, 0.0089, 0.0079, 0.0176, 0.0285, 0.0317, 0.0120, 0.0347, 0.0043,\n",
      "        0.0470, 0.0061, 0.0042, 0.0443, 0.0299, 0.0111, 0.0045, 0.0207, 0.0423,\n",
      "        0.0062, 0.0142, 0.0416, 0.0079, 0.0959, 0.0099, 0.0080, 0.0313, 0.0126,\n",
      "        0.0229, 0.0072, 0.0113, 0.0171, 0.0147], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 133: BSNE_Loss=0.2139,local_loss: 0.21391871080719044,global_loss: 0.06928007304668427\n",
      "Q: tensor([0.0215, 0.0245, 0.0142, 0.0158, 0.0276, 0.0058, 0.0204, 0.0138, 0.0273,\n",
      "        0.0061, 0.0213, 0.0165, 0.0197, 0.0233, 0.0098, 0.0227, 0.0295, 0.0113,\n",
      "        0.0146, 0.0183, 0.0389, 0.0208, 0.0577, 0.0074, 0.0483, 0.0096, 0.0353,\n",
      "        0.0123, 0.0113, 0.0517, 0.0136, 0.0054, 0.0374, 0.0076, 0.0246, 0.0209,\n",
      "        0.0255, 0.0089, 0.0256, 0.0063, 0.0189, 0.0037, 0.0321, 0.0277, 0.0063,\n",
      "        0.0101, 0.0089, 0.0253, 0.0212, 0.0128], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 134: BSNE_Loss=0.1023,local_loss: 0.10234715873196702,global_loss: 0.19248737394809723\n",
      "Q: tensor([0.0275, 0.0052, 0.0101, 0.0161, 0.0232, 0.0388, 0.0093, 0.0374, 0.0098,\n",
      "        0.0230, 0.0068, 0.0133, 0.0088, 0.0193, 0.0138, 0.0185, 0.0154, 0.0192,\n",
      "        0.0149, 0.0624, 0.0187, 0.0098, 0.1124, 0.0121, 0.0204, 0.0084, 0.0070,\n",
      "        0.0239, 0.0070, 0.0130, 0.0233, 0.0078, 0.0076, 0.0142, 0.0073, 0.0349,\n",
      "        0.1047, 0.0107, 0.0035, 0.0080, 0.0162, 0.0134, 0.0152, 0.0054, 0.0127,\n",
      "        0.0088, 0.0656, 0.0028, 0.0077, 0.0042], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 135: BSNE_Loss=0.2720,local_loss: 0.2720418904018134,global_loss: 0.006014107260853052\n",
      "Q: tensor([0.0294, 0.0039, 0.0118, 0.0141, 0.0322, 0.0371, 0.0417, 0.0091, 0.0064,\n",
      "        0.0261, 0.0106, 0.0098, 0.0302, 0.0324, 0.0249, 0.0137, 0.0136, 0.0066,\n",
      "        0.0033, 0.0262, 0.0170, 0.0208, 0.0174, 0.0113, 0.0072, 0.0226, 0.0114,\n",
      "        0.0147, 0.0133, 0.0062, 0.0931, 0.0560, 0.0078, 0.0169, 0.0576, 0.0485,\n",
      "        0.0172, 0.0228, 0.0054, 0.0043, 0.0047, 0.0336, 0.0140, 0.0126, 0.0154,\n",
      "        0.0234, 0.0030, 0.0023, 0.0336, 0.0032], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 136: BSNE_Loss=0.2471,local_loss: 0.2470866462060814,global_loss: 0.21100448071956635\n",
      "Q: tensor([0.0390, 0.0091, 0.0184, 0.0157, 0.0043, 0.0218, 0.0268, 0.0067, 0.0189,\n",
      "        0.0037, 0.0132, 0.0247, 0.0083, 0.0131, 0.0212, 0.0023, 0.0139, 0.0220,\n",
      "        0.0073, 0.0385, 0.0262, 0.0075, 0.0031, 0.0027, 0.0140, 0.0103, 0.0056,\n",
      "        0.0122, 0.0350, 0.0103, 0.0961, 0.0182, 0.0066, 0.0098, 0.0094, 0.0249,\n",
      "        0.0203, 0.2157, 0.0138, 0.0044, 0.0093, 0.0084, 0.0215, 0.0142, 0.0035,\n",
      "        0.0215, 0.0095, 0.0143, 0.0136, 0.0090], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 137: BSNE_Loss=0.3636,local_loss: 0.3636011839107421,global_loss: 0.15724536776542664\n",
      "Q: tensor([0.0065, 0.0127, 0.0093, 0.0256, 0.0041, 0.0092, 0.0310, 0.0163, 0.0418,\n",
      "        0.0130, 0.0392, 0.0099, 0.0073, 0.0884, 0.0074, 0.0409, 0.0395, 0.0117,\n",
      "        0.0372, 0.0076, 0.0114, 0.0058, 0.0348, 0.0096, 0.0191, 0.0106, 0.0043,\n",
      "        0.0126, 0.0394, 0.0216, 0.0184, 0.0228, 0.0164, 0.0234, 0.0207, 0.0137,\n",
      "        0.0392, 0.0046, 0.0044, 0.0085, 0.0374, 0.0250, 0.0197, 0.0040, 0.0054,\n",
      "        0.0143, 0.0042, 0.0145, 0.0090, 0.0667], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 138: BSNE_Loss=0.2226,local_loss: 0.22258451468661014,global_loss: 0.15277600288391113\n",
      "Q: tensor([0.0395, 0.0176, 0.0522, 0.0047, 0.0270, 0.0367, 0.0116, 0.0111, 0.0469,\n",
      "        0.0296, 0.0019, 0.0588, 0.0107, 0.0122, 0.0148, 0.0088, 0.0143, 0.0132,\n",
      "        0.0138, 0.0330, 0.0083, 0.0085, 0.0065, 0.0096, 0.0205, 0.0205, 0.0065,\n",
      "        0.0310, 0.0079, 0.0172, 0.0226, 0.0117, 0.0035, 0.0238, 0.0091, 0.0125,\n",
      "        0.0095, 0.0325, 0.0102, 0.0142, 0.0053, 0.0612, 0.0197, 0.0229, 0.0206,\n",
      "        0.0366, 0.0176, 0.0080, 0.0348, 0.0289], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 139: BSNE_Loss=0.1662,local_loss: 0.1661594174706707,global_loss: 0.42027896642684937\n",
      "Q: tensor([0.0544, 0.0180, 0.0094, 0.0080, 0.0122, 0.0060, 0.0383, 0.0576, 0.0082,\n",
      "        0.0092, 0.0027, 0.0223, 0.0097, 0.0297, 0.0132, 0.0049, 0.0104, 0.0111,\n",
      "        0.0069, 0.0262, 0.0093, 0.0102, 0.0148, 0.0105, 0.0243, 0.0213, 0.0200,\n",
      "        0.0647, 0.0108, 0.0116, 0.0036, 0.0446, 0.0333, 0.0089, 0.0204, 0.0312,\n",
      "        0.0277, 0.0171, 0.0229, 0.0099, 0.0292, 0.0454, 0.0210, 0.0041, 0.0095,\n",
      "        0.0574, 0.0099, 0.0335, 0.0041, 0.0104], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 140: BSNE_Loss=0.2017,local_loss: 0.2016594111452541,global_loss: 0.13141842186450958\n",
      "Q: tensor([0.0258, 0.0096, 0.0162, 0.0097, 0.0325, 0.0049, 0.0215, 0.0223, 0.0744,\n",
      "        0.0306, 0.0179, 0.0095, 0.0070, 0.0391, 0.0116, 0.0088, 0.0069, 0.0335,\n",
      "        0.0289, 0.0128, 0.0101, 0.0372, 0.0105, 0.0119, 0.0320, 0.0159, 0.0215,\n",
      "        0.0120, 0.0282, 0.0162, 0.0191, 0.0512, 0.0331, 0.0303, 0.0040, 0.0172,\n",
      "        0.0258, 0.0297, 0.0112, 0.0090, 0.0272, 0.0102, 0.0132, 0.0115, 0.0252,\n",
      "        0.0114, 0.0109, 0.0081, 0.0157, 0.0166], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 141: BSNE_Loss=0.1007,local_loss: 0.10066848622410253,global_loss: 0.46525201201438904\n",
      "Q: tensor([0.0131, 0.0069, 0.0148, 0.0491, 0.0057, 0.0070, 0.0115, 0.0108, 0.0039,\n",
      "        0.0355, 0.0423, 0.0747, 0.0198, 0.0404, 0.0113, 0.0312, 0.0127, 0.0143,\n",
      "        0.0205, 0.0229, 0.0146, 0.0018, 0.0175, 0.0152, 0.0104, 0.0253, 0.0667,\n",
      "        0.0076, 0.0182, 0.0144, 0.0149, 0.0237, 0.0158, 0.0143, 0.0131, 0.0294,\n",
      "        0.0128, 0.0091, 0.0068, 0.0093, 0.0369, 0.0165, 0.0161, 0.0478, 0.0031,\n",
      "        0.0049, 0.0393, 0.0098, 0.0174, 0.0190], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 142: BSNE_Loss=0.1855,local_loss: 0.1854940858507777,global_loss: 0.24786537885665894\n",
      "Q: tensor([0.0225, 0.0134, 0.0367, 0.0088, 0.0523, 0.0222, 0.0186, 0.0099, 0.0157,\n",
      "        0.0213, 0.0130, 0.0125, 0.0045, 0.0527, 0.0133, 0.0094, 0.0077, 0.0180,\n",
      "        0.0027, 0.0413, 0.0466, 0.0073, 0.0363, 0.0089, 0.0213, 0.0118, 0.0214,\n",
      "        0.0520, 0.0392, 0.0124, 0.0069, 0.0093, 0.0043, 0.0090, 0.0534, 0.0097,\n",
      "        0.0111, 0.0231, 0.0334, 0.0208, 0.0299, 0.0315, 0.0195, 0.0023, 0.0230,\n",
      "        0.0267, 0.0097, 0.0106, 0.0076, 0.0045], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 143: BSNE_Loss=0.1854,local_loss: 0.18541692471337404,global_loss: 0.19685693085193634\n",
      "Q: tensor([0.0341, 0.0070, 0.0158, 0.0174, 0.0226, 0.0217, 0.0475, 0.0111, 0.0604,\n",
      "        0.0187, 0.0076, 0.0545, 0.0157, 0.0088, 0.0157, 0.0510, 0.0081, 0.0163,\n",
      "        0.0403, 0.0054, 0.0039, 0.0298, 0.0056, 0.0057, 0.0126, 0.0178, 0.0176,\n",
      "        0.0433, 0.0214, 0.0292, 0.0152, 0.0183, 0.0322, 0.0078, 0.0183, 0.0100,\n",
      "        0.0057, 0.0109, 0.0217, 0.0206, 0.0199, 0.0095, 0.0206, 0.0144, 0.0092,\n",
      "        0.0226, 0.0254, 0.0136, 0.0258, 0.0119], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "Step 144: BSNE_Loss=0.1205,local_loss: 0.1205179229917581,global_loss: 0.2780851125717163\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 157\u001b[39m\n\u001b[32m    155\u001b[39m bp_node_features = []\n\u001b[32m    156\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m node_idx \u001b[38;5;129;01min\u001b[39;00m bp_nodes:\n\u001b[32m--> \u001b[39m\u001b[32m157\u001b[39m     node_subgraph = \u001b[43mcreate_node_subgraph\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnode_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfeat_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_indexs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    158\u001b[39m     _, node_feature = bsne_model([node_subgraph])\n\u001b[32m    159\u001b[39m     bp_node_features.append(node_feature.squeeze(\u001b[32m0\u001b[39m))\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 70\u001b[39m, in \u001b[36mcreate_node_subgraph\u001b[39m\u001b[34m(node_idx, feat_data, edge_indexs, device)\u001b[39m\n\u001b[32m     68\u001b[39m sub_edge_index = []\n\u001b[32m     69\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m rel_idx \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(edge_indexs)):\n\u001b[32m---> \u001b[39m\u001b[32m70\u001b[39m     edge_index = \u001b[43medge_indexs\u001b[49m\u001b[43m[\u001b[49m\u001b[43mrel_idx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcpu\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m.numpy()\n\u001b[32m     71\u001b[39m     mask = np.isin(edge_index[\u001b[32m0\u001b[39m], sub_nodes) & np.isin(edge_index[\u001b[32m1\u001b[39m], sub_nodes)\n\u001b[32m     72\u001b[39m     local_edges = edge_index[:, mask]\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 进行完整测试（新）（每个step不重新采样子图）（不加入平方项）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "#     \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.00005, #0.0005，\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 4,\n",
    "    \"num_layers\": 4,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=args['num_layers'],\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "    \n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "\n",
    "loss_threshold = 0.05\n",
    "sample_size = 50\n",
    "max_steps_per_center = 200  # 防止死循环\n",
    "max_epochs = 100\n",
    "center_indices = list(range(feat_data.shape[0]))\n",
    "# 在每轮epoch前随机打乱中心点顺序\n",
    "rd.shuffle(center_indices)\n",
    "\n",
    "# 限制训练的中心点数量\n",
    "center_indices = center_indices[:max_epochs]\n",
    "\n",
    "\n",
    "for epoch, center_idx in enumerate(center_indices):\n",
    "    print(f\"\\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===\")\n",
    "    step = 0\n",
    "    \n",
    "    \n",
    "    # 构建Bp子图\n",
    "    dist_row = dist_matrix[center_idx].cpu().numpy()\n",
    "    probs = np.power(10.0, -dist_row)\n",
    "    probs[center_idx] = 0\n",
    "    probs /= probs.sum()\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "        bp_nodes = [center_idx] + neighbors.tolist()\n",
    "    else:\n",
    "        bp_nodes = [center_idx]\n",
    "\n",
    "    # 构建Bu子图\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "        bu_nodes = [center_idx] + neighbors.tolist()\n",
    "    else:\n",
    "        bu_nodes = [center_idx]\n",
    "    \n",
    "    while True:\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        total_loss = 0.0  # 每个step都要重置\n",
    "        eps = 1e-10\n",
    "        \n",
    "        # 计算Bp子图中所有节点的特征\n",
    "        bp_node_features = []\n",
    "        for node_idx in bp_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "        bp_features = torch.stack(bp_node_features)\n",
    "        \n",
    "        # 计算Bu子图中所有节点的特征\n",
    "        bu_node_features = []\n",
    "        for node_idx in bu_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "        bu_features = torch.stack(bu_node_features)\n",
    "        \n",
    "        # 计算loss\n",
    "        center_feature = bp_features[0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_nodes[1:]\n",
    "        other_bp_features = bp_features[1:]\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            # 计算中心节点到其他Bp节点的原始距离\n",
    "            orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "            # 计算P向量（基于节点路径距离）\n",
    "            P = np.exp(-orig_dists_bp)\n",
    "            P_sum = P.sum()\n",
    "            P = P / (P_sum + eps)\n",
    "            P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "        feat_dists_bp.requires_grad_(True)\n",
    "        \n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "        \n",
    "#         log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        log_ratio = torch.log(((P + eps)/(Q + eps)))\n",
    "        loss_local = log_ratio.mean() \n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features_ = bu_features[1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features_).squeeze(0)\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        \n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))\n",
    "\n",
    "        # = 5. 损失合并 =\n",
    "#         total_loss += loss_local + loss_global\n",
    "        total_loss += loss_local\n",
    "    \n",
    "        total_loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        print(\"Q:\",Q)\n",
    "        print(f\"Step {step}: BSNE_Loss={total_loss.item():.4f},local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        step += 1\n",
    "        \n",
    "        if total_loss.item() < loss_threshold or step >= max_steps_per_center:\n",
    "            print(f\"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}\")\n",
    "            break\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "ba46f6b3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.0277, 0.0102, 0.0277, 0.0102, 0.0277, 0.0277, 0.0277, 0.0102, 0.0102,\n",
       "        0.0277, 0.0102, 0.0277, 0.0277, 0.0277, 0.0277, 0.0102, 0.0102, 0.0277,\n",
       "        0.0102, 0.0102, 0.0277, 0.0277, 0.0277, 0.0102, 0.0277, 0.0277, 0.0277,\n",
       "        0.0102, 0.0102, 0.0277, 0.0102, 0.0277, 0.0102, 0.0277, 0.0277, 0.0102,\n",
       "        0.0102, 0.0277, 0.0277, 0.0277, 0.0102, 0.0102, 0.0102, 0.0277, 0.0102,\n",
       "        0.0277, 0.0277, 0.0102, 0.0102, 0.0277], device='cuda:0',\n",
       "       dtype=torch.float64)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "P"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "444ef46a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.0098, 0.0092, 0.0085, 0.0055, 0.0127, 0.0428, 0.0141, 0.0092, 0.0114,\n",
       "        0.0138, 0.0651, 0.0259, 0.0118, 0.0193, 0.0250, 0.0063, 0.0073, 0.0330,\n",
       "        0.0246, 0.0196, 0.0216, 0.0920, 0.0183, 0.0054, 0.0084, 0.0567, 0.0025,\n",
       "        0.0092, 0.0265, 0.0114, 0.0300, 0.0041, 0.0035, 0.0370, 0.0167, 0.0051,\n",
       "        0.0073, 0.0083, 0.0086, 0.0060, 0.0025, 0.0037, 0.0218, 0.0050, 0.0079,\n",
       "        0.0301, 0.1105, 0.0348, 0.0072, 0.0229], device='cuda:0',\n",
       "       grad_fn=<SoftmaxBackward0>)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Q"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "bf489673",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 1.0434,  0.0972,  1.1764,  0.6240,  0.7829, -0.4354,  0.6788,  0.1044,\n",
       "        -0.1120,  0.6993, -1.8536,  0.0659,  0.8531,  0.3638,  0.1040,  0.4736,\n",
       "         0.3323, -0.1751, -0.8824, -0.6555,  0.2477, -1.2005,  0.4128,  0.6293,\n",
       "         1.1946, -0.7164,  2.4154,  0.1056, -0.9563,  0.8881, -1.0786,  1.9039,\n",
       "         1.0603, -0.2891,  0.5088,  0.6916,  0.3331,  1.2028,  1.1713,  1.5352,\n",
       "         1.3907,  1.0014, -0.7591,  1.7113,  0.2611, -0.0845, -1.3831, -1.2290,\n",
       "         0.3484,  0.1902], device='cuda:0', dtype=torch.float64,\n",
       "       grad_fn=<LogBackward0>)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.log(((P + eps)/(Q + eps)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "7b9d1c55",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.2958, device='cuda:0', dtype=torch.float64, grad_fn=<MeanBackward0>)"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.log(((P + eps)/(Q + eps))).mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "79751079",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1.0887e+00, 9.4565e-03, 1.3840e+00, 3.8941e-01, 6.1288e-01, 1.8958e-01,\n",
       "        4.6073e-01, 1.0903e-02, 1.2549e-02, 4.8904e-01, 3.4359e+00, 4.3410e-03,\n",
       "        7.2772e-01, 1.3234e-01, 1.0809e-02, 2.2428e-01, 1.1040e-01, 3.0673e-02,\n",
       "        7.7862e-01, 4.2965e-01, 6.1333e-02, 1.4413e+00, 1.7040e-01, 3.9601e-01,\n",
       "        1.4272e+00, 5.1316e-01, 5.8340e+00, 1.1143e-02, 9.1459e-01, 7.8867e-01,\n",
       "        1.1633e+00, 3.6248e+00, 1.1243e+00, 8.3559e-02, 2.5887e-01, 4.7829e-01,\n",
       "        1.1093e-01, 1.4467e+00, 1.3718e+00, 2.3568e+00, 1.9340e+00, 1.0028e+00,\n",
       "        5.7625e-01, 2.9286e+00, 6.8173e-02, 7.1397e-03, 1.9129e+00, 1.5105e+00,\n",
       "        1.2135e-01, 3.6171e-02], device='cuda:0', dtype=torch.float64,\n",
       "       grad_fn=<PowBackward0>)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.log(((P + eps)/(Q + eps)))**2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "ead0a27f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.8841, device='cuda:0', dtype=torch.float64, grad_fn=<MeanBackward0>)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "(torch.log(((P + eps)/(Q + eps)))**2).mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "55d05f5b",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tsne01",
   "language": "python",
   "name": "tsne01"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
