#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2022/4/24 上午9:45
# @Author  : cherry_wb
# @Site    : 
# @File    : tnet1.py
# @Software: PyCharm
from __future__ import print_function

import argparse
import copy
import os

import dgl.function as fn
import torch
import torch as th
import torch.utils.data
from commonCls import TrainingSummaryWriter
from dataloader import DGLDataLoader
from dgl.nn.pytorch import GatedGraphConv as DGLGatedGraphConv
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
from dgl.nn.pytorch.glob import softmax_nodes, sum_nodes
from dgl.utils import expand_as_pair
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.svm import SVC as SVM
from torch import nn, optim
from torch.nn import functional as F
from torch.nn import init
from torch.nn.parameter import Parameter
from utils.data_utils import report, report_to_str
from utils.tsne import plot_embedding


class BiRNN(nn.Module):
    def __init__(self, input_size, hidden_size, out_dim=2, rnn_type='lstm', num_layers=2, bidirectional=True):
        super(BiRNN, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.rnn_type = rnn_type
        if self.rnn_type == 'lstm':
            self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True,
                                bidirectional=bidirectional)  
        elif self.rnn_type == 'gru':
            self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True,
                              bidirectional=bidirectional)
        elif self.rnn_type == 'rnn':
            self.rnn = nn.RNN(
                input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
                batch_first=True, bidirectional=bidirectional)
        else:
            raise Exception("unknow type")
        self.fc = nn.Linear(hidden_size * 2, out_dim)

    def forward(self, x):

        if self.rnn_type == 'lstm':
            h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device)
            c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device)
            out_pack, hidden = self.lstm(x, (h0, c0))
            out = torch.cat([hidden[0][-1], hidden[1][-1]], dim=1)
            out = self.fc(out)
            return out_pack, out
        elif self.rnn_type == 'gru':
            # gru
            h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device)
            out_pack, hidden = self.gru(x, h0)  # .permute( 1, 0, 2)
            out = torch.cat([hidden[-1], hidden[-2]], dim=1)
            out = self.fc(out)
            return out_pack, out  # .permute(1, 0, 2)
        elif self.rnn_type == 'rnn':
            out_pack, hidden = self.rnn(x, None)
            out = torch.cat([hidden[-1], hidden[-2]], dim=1)
            out = self.fc(out)
            return out_pack, out
        else:
            raise Exception("unknow type")


class AttenNetBlock(nn.Module):

    def __init__(self, in_feats, num_heads=1, attn_drop=0.5, softmax_dim=-1):
        super(AttenNetBlock, self).__init__()
        self.atten_out = nn.Linear(in_feats, in_feats*num_heads)
        self.fc_out = nn.Linear(in_feats, in_feats*num_heads)
        self.attn_para = nn.Parameter(torch.FloatTensor(size=(1, num_heads, in_feats)))
        self.attn_drop = nn.Dropout(attn_drop)
        self.softmax_dim = softmax_dim
        self._num_heads = num_heads
        self._in_feats = in_feats
        self.reset_parameters()

    def reset_parameters(self):
        r"""
        """
        gain = init.calculate_gain('relu')
        init.xavier_normal_(self.atten_out.weight, gain=gain)
        init.zeros_(self.atten_out.bias)
        init.xavier_normal_(self.fc_out.weight, gain=gain)
        init.zeros_(self.fc_out.bias)
        init.xavier_normal_(self.attn_para, gain=gain)

    def forward(self, feat):
        gate = self.atten_out(feat).view(
            -1, self._num_heads, self._in_feats)
        gate = gate * self.attn_para #.sum(dim=-2)
        gate = self.attn_drop(torch.softmax(gate, self.softmax_dim))  # th.softmax(gate, -1)
        feat = self.fc_out(F.normalize(feat)).view(
            -1, self._num_heads, self._in_feats)
        feat = feat * gate
        return feat.sum(dim=-2)


class AttenNet(nn.Module):
    def __init__(self, in_feats, hidden_dim, out_dim=2,num_heads=7, num_layers=3, dropout_p=0.5):
        super(AttenNet, self).__init__()
        self.num_layers = num_layers
        self.num_heads = num_heads
        self.out_feats = in_feats
        self.dropout_p = dropout_p
        self.drop = nn.Dropout(dropout_p)
        self.to_hidden = nn.Sequential(
            nn.Linear(in_features=in_feats, out_features=hidden_dim),
            nn.BatchNorm1d(num_features=hidden_dim),
            nn.ReLU(True),
            # nn.Dropout(p=self.dropout_p)
        )
        self.atten = AttenNetBlock(hidden_dim, self.num_heads, dropout_p)
        self.norms = nn.ModuleList(
            [nn.LayerNorm(hidden_dim) for _ in range(num_layers)]
        )
        self.out_layer = nn.Sequential(
            nn.Linear(in_features=hidden_dim, out_features=int(hidden_dim / 2)),
            nn.ReLU(True),
            nn.Dropout(p=self.dropout_p),
            nn.Linear(in_features=int(hidden_dim / 2), out_features=out_dim),
        )

        self.reset_parameters()

    def reset_parameters(self) -> None:
        pass

    def forward(self, x):
        x_in = x / self.num_layers
        x = self.to_hidden(x)
        out = 0
        for layer in range(self.num_layers):
            # x = x + x_in
            x = self.atten(x)
            x = self.norms[layer](x)
            x = F.gelu(x)
        out += self.out_layer(x)

        return out


class ClassifierNet(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, dropout_p=0.5):
        super(ClassifierNet, self).__init__()
        self.dropout_p = dropout_p
        self.classifier = nn.Sequential(
            nn.Linear(in_features=input_dim, out_features=hidden_dim),
            nn.BatchNorm1d(num_features=hidden_dim),
            nn.ReLU(True),
            nn.Dropout(p=self.dropout_p),
            nn.Linear(in_features=hidden_dim, out_features=int(hidden_dim / 2)),
            nn.ReLU(True),
            nn.Dropout(p=self.dropout_p),
            nn.Linear(in_features=int(hidden_dim / 2), out_features=output_dim),
        )
        self.output_dim = output_dim

    def forward(self, features, labels=None):
        try:
            classresult = self.classifier(features)
            if self.output_dim == 1:
                sig = torch.sigmoid(torch.flatten(classresult))
                return sig
            return classresult.squeeze(1)
        except Exception as e:
            print(features.shape, e)
            raise e


class LinearDrop(nn.Module):
    def __init__(self, input_dim, output_dim, dropout_p=0.5):
        super(LinearDrop, self).__init__()
        self.dropout_p = dropout_p
        self.linear = nn.Linear(in_features=input_dim, out_features=output_dim)
        self.drop = nn.Dropout(p=self.dropout_p)
        self.output_dim = output_dim

    def reset_parameters(self):
        gain = nn.init.calculate_gain('relu')
        nn.init.xavier_normal_(self.linear.weight, gain=gain)
        nn.init.zeros_(self.linear.bias)

    def forward(self, features):
        return self.drop(self.linear(features))


class ApplyNodeFunc(nn.Module):
    """Update the node feature hv with MLP, BN and ReLU."""

    def __init__(self, mlp):
        super(ApplyNodeFunc, self).__init__()
        self.mlp = mlp
        self.bn = nn.LayerNorm(self.mlp.output_dim)

    def forward(self, h):
        h = self.mlp(h)
        h = self.bn(h)
        h = F.relu(h)
        return h


class DGLMLP(nn.Module):
    def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
        super(DGLMLP, self).__init__()
        self.linear_or_not = True  # default is linear model
        self.num_layers = num_layers
        self.output_dim = output_dim

        if num_layers < 1:
            raise ValueError("number of layers should be positive!")
        elif num_layers == 1:
            # Linear model
            self.linear = nn.Linear(input_dim, output_dim)
        else:
            # Multi-layer model
            self.linear_or_not = False
            self.linears = torch.nn.ModuleList()
            self.batch_norms = torch.nn.ModuleList()

            self.linears.append(nn.Linear(input_dim, hidden_dim))
            for layer in range(num_layers - 2):
                self.linears.append(nn.Linear(hidden_dim, hidden_dim))
            self.linears.append(nn.Linear(hidden_dim, output_dim))

            for layer in range(num_layers - 1):
                self.batch_norms.append(nn.LayerNorm(hidden_dim))
                # self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))

    def forward(self, x):
        if self.linear_or_not:
            # If linear model
            return self.linear(x)
        else:
            # If MLP
            h = x
            for i in range(self.num_layers - 1):
                h = F.relu(self.batch_norms[i](self.linears[i](h)))
                # h = F.relu(self.linears[i](h))
            return self.linears[-1](h)


class MultiHeadAttentionPooling(nn.Module):

    def __init__(self, gate_nn, nhead=1, feat_nn=None):
        super(MultiHeadAttentionPooling, self).__init__()
        self.gate_nns = self._get_clones(gate_nn, nhead)
        self.feat_nn = feat_nn
        self.nhead = nhead
        self.reset_parameters()

    def reset_parameters(self):
        gain = nn.init.calculate_gain('relu')
        for linear in self.gate_nns:
            if isinstance(linear,nn.Linear):
                nn.init.xavier_normal_(linear.weight, gain=gain)
                nn.init.zeros_(linear.bias)
            if isinstance(linear, LinearDrop):
                linear.reset_parameters()

    def _get_clones(self, module, N):
        return nn.ModuleList([copy.deepcopy(module) for i in range(N)])

    def forward(self, graph, feat):
        with graph.local_scope():
            readout = 0
            feat = self.feat_nn(feat) if self.feat_nn else feat
            gates = []
            feats = []
            for gate_nn in self.gate_nns:
                gates.append(gate_nn(feat))
                feats.append(feat)
            gates = torch.cat(gates, dim=1)
            feats = torch.cat(feats, dim=1)
            graph.ndata['gate'] = gates
            gates = softmax_nodes(graph, 'gate')
            graph.ndata.pop('gate')

            graph.ndata['r'] = feats * gates
            readout = sum_nodes(graph, 'r')
            graph.ndata.pop('r')

            readout = readout.view(-1, self.nhead, feat.shape[1]).sum(dim=1)

            return readout


class MultiHeadAttentionBlock(nn.Module):
    def __init__(self, in_feats,  attn_drop=0.5, softmax_dim=-1, num_heads=2):
        super(MultiHeadAttentionBlock, self).__init__()
        self.atten_out = nn.Linear(in_feats, in_feats*num_heads)
        self.fc_out = nn.Linear(in_feats, in_feats*num_heads)
        self.attn_para = nn.Parameter(torch.FloatTensor(size=(1, num_heads, in_feats)))
        self.attn_drop = nn.Dropout(attn_drop)
        self.softmax_dim = softmax_dim
        self._num_heads = num_heads
        self._in_feats = in_feats
        self.reset_parameters()

    def reset_parameters(self):
        r"""
        """
        gain = init.calculate_gain('relu')
        init.xavier_normal_(self.atten_out.weight, gain=gain)
        init.zeros_(self.atten_out.bias)
        init.xavier_normal_(self.fc_out.weight, gain=gain)
        init.zeros_(self.fc_out.bias)
        init.xavier_normal_(self.attn_para, gain=gain)

    def forward(self, feat):
        gate = self.atten_out(feat).view(
            -1, self._num_heads, self._in_feats)
        gate = gate * self.attn_para   #.sum(dim=-2)
        gate = self.attn_drop(torch.softmax(gate, self.softmax_dim))  # th.softmax(gate, -1)
        feat = self.fc_out(F.normalize(feat)).view(
            -1, self._num_heads, self._in_feats)
        feat = feat * gate
        return feat.sum(dim=-2)


class GINAttentionConv(nn.Module):
    def __init__(self,
                 apply_func,
                 out_feats,
                 n_etypes,
                 attn_drop=0.3,
                 aggregator_type='sum',
                 attn_heads=4,
                 init_eps=0,
                 learn_eps=False
                 ):
        super(GINAttentionConv, self).__init__()
        self.apply_func = apply_func
        self._aggregator_type = aggregator_type
        self._n_etypes = n_etypes
        self.attens_src = nn.ModuleList(
            [MultiHeadAttentionBlock(out_feats, attn_drop,num_heads=attn_heads) for _ in range(n_etypes)]
        )
        self.out_atten = MultiHeadAttentionBlock(out_feats, attn_drop, 0, num_heads=attn_heads)
        self.gru = nn.GRUCell(out_feats, out_feats, bias=True)
        if aggregator_type == 'sum':
            self._reducer = fn.sum
        elif aggregator_type == 'max':
            self._reducer = fn.max
        elif aggregator_type == 'mean':
            self._reducer = fn.mean
        else:
            raise KeyError('Aggregator type {} not recognized.'.format(aggregator_type))
        if learn_eps:
            self.eps = th.nn.Parameter(th.FloatTensor([init_eps]))
        else:
            self.register_buffer('eps', th.FloatTensor([init_eps]))

        self.reset_parameters()

    def reset_parameters(self):
        gain = init.calculate_gain('relu')
        self.gru.reset_parameters()

    def edge_func(self, i):
        def _edge_func(edges):
            src_atten = self.attens_src[i](edges.src['h'])
            return {'W_e*h': src_atten}

        return _edge_func

    def forward(self, graph, feat, etypes, edge_weight=None):
        with graph.local_scope():
            aggregate_fn = fn.copy_src('h', 'm')
            if edge_weight is not None:
                assert edge_weight.shape[0] == graph.number_of_edges()
                graph.edata['_edge_weight'] = edge_weight
                aggregate_fn = fn.u_mul_e('h', '_edge_weight', 'm')

            feat_src, feat_dst = expand_as_pair(feat, graph)
            graph.srcdata['h'] = feat_src
            graph.update_all(aggregate_fn, self._reducer('m', 'neigh'))
            rst = (1 + self.eps) * feat_dst + graph.dstdata['neigh']

            for i in range(self._n_etypes):
                eids = th.nonzero(etypes == i, as_tuple=False).view(-1).type(graph.idtype)
                if len(eids) > 0:
                    graph.apply_edges(
                        self.edge_func(i),
                        eids
                    )
            graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a'))
            a = graph.ndata.pop('a')  # (N, D)
            atten_feat = self.out_atten(a)
            if self.apply_func is not None:
                rst = self.apply_func(rst)
            rst = rst + atten_feat #+ feat_gru
            return rst


class GTCNet(nn.Module):
    """"""

    def __init__(self, num_layers, num_hop, num_mlp_layers, input_dim, hidden_dim,
                 output_dim, n_etypes=2, attn_drop=0.3, final_dropout=0.5, learn_eps=True, graph_pooling_type='atten',
                 neighbor_pooling_type='sum'):

        super(GTCNet, self).__init__()
        self.num_layers = num_layers
        self.hidden_dim = hidden_dim
        self.input_dim = input_dim
        self.learn_eps = learn_eps

        self.asm_size = 256
        self.birnn_in_dim = 100  # 100 32
        self.birnn_hidden_size = 256
        self.birnn_out_dim = 256
        self.seq_len = 1500

        self.w_asm = Parameter(torch.Tensor(self.asm_size, int(self.birnn_in_dim)))

        self.birnn = BiRNN(self.birnn_in_dim, self.birnn_hidden_size, self.birnn_out_dim, rnn_type='lstm',
                           num_layers=2)
        self.classifier = ClassifierNet(self.birnn_out_dim, int(self.birnn_out_dim / 2), 2,final_dropout)

        if self.input_dim > self.hidden_dim:
            self.embed_para = Parameter(torch.Tensor(self.input_dim, self.hidden_dim))
        else:
            self.embed_para = None
        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()
        self.ggnn = DGLGatedGraphConv(in_feats=input_dim,
                                   out_feats=input_dim,
                                   n_steps=num_hop,
                                   n_etypes=n_etypes)

        for layer in range(self.num_layers - 1):
            if layer == 0:
                mlp = DGLMLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)
            else:
                mlp = DGLMLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)

            self.ginlayers.append(
                GINAttentionConv(ApplyNodeFunc(mlp), hidden_dim, n_etypes, attn_drop, learn_eps==self.learn_eps)
                #DGLGINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps)
            )
            self.batch_norms.append(nn.LayerNorm(hidden_dim))

        self.linears_prediction = torch.nn.ModuleList()

        for layer in range(num_layers):
            if layer == 0:
                self.linears_prediction.append(
                    ClassifierNet(input_dim, int(input_dim/2), output_dim, attn_drop))
            else:
                self.linears_prediction.append(
                    LinearDrop(hidden_dim, output_dim, final_dropout)
                    ) #nn.Linear(hidden_dim, output_dim)

        self.drop = nn.Dropout(final_dropout)
        self.att_drop = nn.Dropout(attn_drop)

        self.pools = torch.nn.ModuleList()

        for layer in range(num_layers):
            if layer == 0:
                if graph_pooling_type == 'sum':
                    pool = SumPooling()
                elif graph_pooling_type == 'mean':
                    pool = AvgPooling()
                elif graph_pooling_type == 'max':
                    pool = MaxPooling()
                else:
                    pooling_gate_nn = LinearDrop(input_dim, input_dim, attn_drop) #nn.Linear(input_dim, input_dim)
                    pool = MultiHeadAttentionPooling(pooling_gate_nn, 4)#GlobalAttentionPooling(pooling_gate_nn)
                self.pools.append(pool)
            else:
                if graph_pooling_type == 'sum':
                    pool = SumPooling()
                elif graph_pooling_type == 'mean':
                    pool = AvgPooling()
                elif graph_pooling_type == 'max':
                    pool = MaxPooling()
                else:
                    # pooling_gate_nn = nn.Linear(hidden_dim, 1)
                    # pool = GlobalAttentionPooling(pooling_gate_nn)
                    pooling_gate_nn = LinearDrop(hidden_dim, hidden_dim, attn_drop)  # nn.Linear(input_dim, input_dim)
                    pool = MultiHeadAttentionPooling(pooling_gate_nn, 4)
                self.pools.append(pool)

        self.num_heads = 4
        self.atten = AttenNetBlock(self.num_layers * 2 + 2, self.num_heads, attn_drop=attn_drop) # + 2
        self.out_layer = nn.Linear(self.num_layers * 2 + 2, 2) #+ 2
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.normal_(self.w_asm, std=0.05)
        if self.embed_para is not None:
            nn.init.normal_(self.embed_para, std=0.05)

    def forward(self, g, h):
        embed_asm = torch.tensordot(h, self.w_asm, dims=1)
        embed_f = embed_asm.view(-1, self.seq_len, embed_asm.shape[-1])
        outputs_rnn, birnn_f = self.birnn(embed_f)
        tout = self.classifier(birnn_f)

        etypes = g.edata.pop('type')
        if self.embed_para is not None:
            h = torch.tensordot(h, self.embed_para, dims=1)

        h1 = self.ggnn(g, h, etypes)

        hidden_rep = [h1]

        for i in range(self.num_layers - 1):
            h = self.ginlayers[i](g, h)
            h = self.batch_norms[i](h)
            h = F.relu(h)
            hidden_rep.append(h)

        xout = []
        xout.append(tout)
        # perform pooling over all nodes in each graph in every layer
        for i, h in enumerate(hidden_rep):
            pooled_h = self.pools[i](g, h)
            # print(pooled_h)
            xout.append(self.linears_prediction[i](pooled_h))

        att_in = torch.cat(xout,dim=1)
        if att_in.shape[1] == 2:
            return att_in
        out = self.out_layer(self.atten(att_in))
        return out
        # return tout

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='复合式神经网络模型')
    parser.add_argument('--batch-size', type=int, default=16, metavar='B',
                        help='input batch size for training (default: 128)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 100)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--rnn-type', type=str, default='gru', metavar='R',
                        help='rnn type (default: gru)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if args.cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}


    def train(epoch,data_loader):
        model.train()
        train_loss = 0
        total_correct = 0
        X = []
        y = []
        all_preds = []
        all_labels = []

        for i, (graphs, labels) in enumerate(data_loader):
            if 'type' not in graphs.edata:
                graphs.edata['type'] = torch.zeros(graphs.num_edges()).float()
            graphs = graphs.to(device)
            feat = graphs.ndata.pop('attr').squeeze(dim=1)
            optimizer.zero_grad()
            c_r = model(graphs, feat)

            _, predicted = torch.max(c_r.data, 1)
            all_preds += [predicted.cpu()]
            all_labels += [labels.cpu()]

            X.extend(c_r.cpu().tolist())
            y.extend(labels.cpu().tolist())

            loss_c = criterion(c_r, labels.long().to(device))
            loss = loss_c # loss + loss_c
            loss.backward()

            train_loss += loss.item()
            optimizer.step()

            total_correct += (predicted.cpu() == labels.data).sum().item()
            if i % args.log_interval == 0:
                # /{} ({:.0f}%)
                print('Train Epoch: {} [{}-{}]\tLoss: {:.6f}'.format(
                    epoch, i * len(labels), total_correct,
                    # len(train_loader.dataset),
                    # 100. * batch_idx / len(train_loader),
                    loss.item()))
        model_svm.fit(X, y)

        predictions = model_svm.predict(X)
        pdata = {
            'accuracy': accuracy_score(y, predictions) * 100,
            'precision': precision_score(y, predictions) * 100,
            'recall': recall_score(y, predictions) * 100,
            'f1': f1_score(y, predictions) * 100,
        }
        print('====> SVM', pdata)
        metrics = report(torch.cat(all_labels, dim=0), torch.cat(all_preds, dim=0).cpu())
        print('====> Train: ',report_to_str(metrics, keys=True))
        print('====> Epoch: {} - {} - {}- Average loss: {:.4f}'.format(
            epoch, total_correct, len(train_loader.dataset),
                                  train_loss / len(train_loader)))  #
        metrics['loss'] = train_loss / len(train_loader)
        metrics['acc'] = pdata['accuracy'] / 100
        metrics['pre'] = pdata['precision'] / 100
        metrics['rec'] = pdata['recall'] / 100
        metrics['f1'] = pdata['f1'] / 100
        return metrics


    def eval_net(epoch,data_loader):
        global best_valid_loss, best_valid_acc
        model.eval()
        test_loss = 0
        with torch.no_grad():
            total_correct = 0
            all_preds = []
            all_labels = []
            all_outputs = []
            X = []
            test_y = []
            for i, (graphs, labels) in enumerate(data_loader):
                if 'type' not in graphs.edata:
                    graphs.edata['type'] = torch.zeros(graphs.num_edges()).float()
                graphs = graphs.to(device)
                feat = graphs.ndata.pop('attr').squeeze(dim=1)
                c_r = model(graphs, feat)

                X.extend(c_r.cpu().tolist())
                test_y.extend(labels.cpu().tolist())
                _, predicted = torch.max(c_r.data, 1)

                all_outputs += [c_r.detach().cpu()]
                all_preds += [predicted.cpu()]
                all_labels += [labels.cpu()]
                total_correct += (predicted.cpu() == labels.data).sum().item()

                test_loss += criterion(c_r, labels.long().to(device))

            plot_embedding(torch.cat(all_outputs).cpu().numpy(), torch.cat(all_labels).numpy())
            metrics = report(torch.cat(all_labels, dim=0), torch.cat(all_preds, dim=0).cpu())
            print('====> T/V: ',report_to_str(metrics, keys=True))
            test_loss /= len(data_loader)
            valid_acc = metrics['acc']
            if test_loss < best_valid_loss or best_valid_acc < valid_acc:
                if test_loss < best_valid_loss:
                    best_valid_loss = test_loss
                if best_valid_acc < valid_acc:
                    best_valid_acc = valid_acc
                m_path = '/home/wb/work/paper/paper_code/checkpoint_model/gtcnet-{}-{:.4f}-{:.4f}.pt'.format(epoch,
                                                                                                          valid_acc,
                                                                                                          test_loss)
                torch.save(model.state_dict(), m_path)

        predictions = model_svm.predict(X)
        pdata = {
            'accuracy': accuracy_score(test_y, predictions) * 100,
            'precision': precision_score(test_y, predictions) * 100,
            'recall': recall_score(test_y, predictions) * 100,
            'f1': f1_score(test_y, predictions) * 100,
        }
        print('====> T/V set loss: {:.4f} - {:.4f} - {:.4f}'.format(test_loss, total_correct,
                                                                     len(torch.cat(all_labels).numpy())))
        print('====> SVM',pdata)
        metrics['loss'] = test_loss
        metrics['acc'] = pdata['accuracy'] / 100
        metrics['pre'] = pdata['precision'] / 100
        metrics['rec'] = pdata['recall'] / 100
        metrics['f1'] = pdata['f1'] / 100
        return metrics


    best_valid_loss = float('inf')
    best_valid_acc = float(0)
    model = GTCNet(4, 1, 2, 256, 384, 2,
                   attn_drop=0.4,#0.3
                   final_dropout=0.4,
                   graph_pooling_type='atten'  #'atten'
                   ).to(device)

    model_svm = SVM()
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.6)
    criterion = nn.CrossEntropyLoss()
    saved_path = '/home/wb/work/paper/paper_code/checkpoint_model/gtcnet-1c39.pt'
    if os.path.exists(saved_path):
        print("Loaded checkpoint model {}".format(saved_path))
        model.load_state_dict(torch.load(saved_path))

    summary = TrainingSummaryWriter("runs/gtc-g-devign")
    from DevignDataSet import DevignDataSet as VulDataSet
    dataset = VulDataSet(name='devign', raw_dir='../dataset/', split=0, max_tolerate_len=5000)
    train_loader, valid_loader = DGLDataLoader(
        dataset, batch_size=16, device=device,
        seed=12345, shuffle=True,
        split_name='fold10', fold_idx=0).train_valid_loader()

    from thop import profile,clever_format
    for i, (graphs, labels) in enumerate(train_loader):
        if 'type' not in graphs.edata:
            graphs.edata['type'] = torch.zeros(graphs.num_edges()).float()
        graphs = graphs.to(device)
        feat = graphs.ndata.pop('attr').squeeze(dim=1)
        flops, params = profile(model, inputs=(graphs, feat,))
        macs, params = clever_format([flops, params], "%.3f")
        print(macs)
        print(params)
        break

    print("data_len t={} v={} ".format(len(train_loader), len(valid_loader)))

    for epoch in range(1, args.epochs + 1):
        data_train = train(epoch,train_loader)
        scheduler.step()
        data_eval = eval_net(epoch, valid_loader)
        summary.step()
        for k, v in data_train.items():
            if k != "loss":
                summary.add_scalar('train_{}'.format(k), v * 100)
            else:
                summary.add_scalar('train_{}'.format(k), v)
        for k, v in data_eval.items():
            if k != "loss":
                summary.add_scalar('eval_{}'.format(k), v * 100)
            else:
                summary.add_scalar('eval_{}'.format(k), v)


    print('====> Valid: ')
    eval_net(epoch, valid_loader)
