#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2022/4/24 上午9:45
# @Author  : cherry_wb
# @Site    : 
# @File    : tnet1.py
# @Software: PyCharm
from __future__ import print_function

import argparse
import os

import numpy as np
import torch
import torch.utils.data
from commonCls import TrainingSummaryWriter
from load_bin2graph import load_bin_devign
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.svm import SVC as SVM
from torch import nn, optim
from torch.nn.parameter import Parameter
from torch_geometric.loader import DataLoader
from utils.data_utils import report, report_to_str
from utils.tsne import plot_embedding


class BiRNN(nn.Module):
    def __init__(self, input_size, hidden_size, out_dim=2, rnn_type='lstm', num_layers=2, bidirectional=True):
        super(BiRNN, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.rnn_type = rnn_type
        if self.rnn_type == 'lstm':
            self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True,
                                bidirectional=bidirectional)
        elif self.rnn_type == 'gru':
            self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True,
                              bidirectional=bidirectional)
        elif self.rnn_type == 'rnn':
            self.rnn = nn.RNN(
                input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
                batch_first=True, bidirectional=bidirectional)
        else:
            raise Exception("unknow type")
        self.fc = nn.Linear(hidden_size * 2, out_dim)

    def forward(self, x):
        if self.rnn_type == 'lstm':
            h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device)
            c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device)
            out_pack, hidden = self.lstm(x, (h0, c0))
            out = torch.cat([hidden[0][-1], hidden[1][-1]], dim=1)
            out = self.fc(out)
            return out_pack, out
        elif self.rnn_type == 'gru':
            # gru
            h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device)
            out_pack, hidden = self.gru(x, h0)  # .permute( 1, 0, 2)
            out = torch.cat([hidden[-1], hidden[-2]], dim=1)
            out = self.fc(out)

            return out_pack, out  #.permute(1, 0, 2)
        elif self.rnn_type == 'rnn':
            out_pack, hidden = self.rnn(x, None)
            out = torch.cat([hidden[-1], hidden[-2]], dim=1)
            out = self.fc(out)
            return out_pack, out
        else:
            raise Exception("unknow type")


class ClassifierNet(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, dropout_p=0.5):
        super(ClassifierNet, self).__init__()
        self.dropout_p = dropout_p
        self.classifier = nn.Sequential(
            nn.Linear(in_features=input_dim, out_features=hidden_dim),
            nn.BatchNorm1d(num_features=hidden_dim),
            nn.ReLU(True),
            nn.Dropout(p=self.dropout_p),
            nn.Linear(in_features=hidden_dim, out_features=int(hidden_dim / 2)),
            nn.ReLU(True),
            nn.Dropout(p=self.dropout_p),
            nn.Linear(in_features=int(hidden_dim / 2), out_features=output_dim),
        )

    def forward(self, features, labels=None):
        classresult = self.classifier(features)
        return classresult.squeeze(1)


class TNet(nn.Module):
    def __init__(self, rnn_type='gru'):
        super(TNet, self).__init__()

        self.op_size = 63
        self.asm_size = 256

        self.birnn_in_dim = 100  # 100 32
        self.birnn_hidden_size = 256
        self.birnn_out_dim = 256

        self.seq_len = 1500
        self.in_dim_op = self.seq_len * self.op_size
        self.in_dim_asm = self.seq_len * self.asm_size
        self.out_dim_op = self.seq_len * (int(self.birnn_in_dim / 2))
        self.out_dim_asm = self.seq_len * (int(self.birnn_in_dim / 2))

        self.tnet_in_dim = self.birnn_out_dim
        self.D_in = 256  # 512 2 4096

        # self.w_op = Parameter(torch.Tensor(self.op_size, int(self.birnn_in_dim / 2)))
        self.w_asm = Parameter(torch.Tensor(self.asm_size, int(self.birnn_in_dim)))

        self.birnn = BiRNN(self.birnn_in_dim, self.birnn_hidden_size, self.birnn_out_dim, rnn_type=rnn_type,
                           num_layers=2)

        self.classifier = ClassifierNet(self.D_in, int(self.D_in / 2), 2)

        self.tnet_hidden_dim = 500

        self.reset_parameters()

    def reset_parameters(self) -> None:
        # self.w_op.data
        # nn.init.normal_(self.w_op, std=0.05)
        nn.init.normal_(self.w_asm, std=0.05)

    def forward(self, x_asm, labels=None):
        embed_asm = torch.tensordot(x_asm, self.w_asm, dims=1)
        embed_f = embed_asm.view(-1, self.seq_len, embed_asm.shape[-1])
        outputs_rnn, birnn_f = self.birnn(embed_f)
        c_r = self.classifier(birnn_f)
        return c_r


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='RNN')
    parser.add_argument('--batch-size', type=int, default=16, metavar='B',
                        help='input batch size for training (default: 128)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 100)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--rnn-type', type=str, default='gru', metavar='R',
                        help='rnn type (default: gru)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if args.cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

    def train(epoch,data_loader):
        model.train()
        train_loss = 0
        total_correct = 0
        X = []
        y = []
        all_preds = []
        all_labels = []

        for i, batch in enumerate(data_loader):
            data, labels = batch, batch.y
            data = data.to(device)
            data_asm = data.x.squeeze(dim=1)

            optimizer.zero_grad()
            c_r = model(data_asm, labels)  # , c_r

            X.extend(c_r.cpu().tolist())
            y.extend(labels.cpu().tolist())

            _, predicted = torch.max(c_r.data, 1)
            all_preds += [predicted.cpu()]
            all_labels += [labels.cpu()]

            loss_c = criterion(c_r, labels.long().to(device))
            loss = loss_c # loss + loss_c

            loss.backward()
            train_loss += loss.item()
            optimizer.step()
            _, predicted = torch.max(c_r.data, 1)
            total_correct += (predicted.cpu() == labels.data).sum().item()
            if i % args.log_interval == 0:
                # /{} ({:.0f}%)
                print('Train Epoch: {} [{}-{}]\tLoss: {:.6f}'.format(
                    epoch, i * len(batch), total_correct,
                    # len(train_loader.dataset),
                    # 100. * batch_idx / len(train_loader),
                    loss.item()))
        model_svm.fit(X, y)

        metrics = report(torch.cat(all_labels, dim=0), torch.cat(all_preds, dim=0).cpu())
        print('====> Train: ',report_to_str(metrics, keys=True))
        print('====> Epoch: {} - {} - {}- Average loss: {:.4f}'.format(
            epoch, total_correct, len(train_loader.dataset),
                                  train_loss / len(train_loader)))  #
        metrics['loss'] = train_loss / len(train_loader)
        return metrics


    def eval_net(epoch,data_loader):
        global best_valid_loss, best_valid_acc
        model.eval()
        test_loss = 0
        with torch.no_grad():
            total_correct = 0
            all_preds = []
            all_labels = []
            all_outputs = []
            X = []
            test_y = []
            for i, batch in enumerate(data_loader):
                data, classes = batch, batch.y
                data = data.to(device)
                data_asm = data.x.squeeze(dim=1)
                c_r = model(data_asm)  # , c_r
                X.extend(c_r.cpu().tolist())
                test_y.extend(classes.cpu().tolist())
                _, predicted = torch.max(c_r.data, 1)

                all_outputs += [c_r.detach().cpu()]
                all_preds += [predicted.cpu()]
                all_labels += [classes.cpu()]

                total_correct += (predicted.cpu() == classes.data).sum().item()

                test_loss += criterion(c_r, classes.long().to(device))

            plot_embedding(torch.cat(all_outputs).cpu().numpy(), torch.cat(all_labels).numpy())
            metrics = report(torch.cat(all_labels, dim=0), torch.cat(all_preds, dim=0).cpu())
            print('====> T/V: ',report_to_str(metrics, keys=True))
            test_loss /= len(test_loader)
            valid_acc = metrics['acc']
            if test_loss < best_valid_loss or best_valid_acc < valid_acc:
                if test_loss < best_valid_loss:
                    best_valid_loss = test_loss
                if best_valid_acc < valid_acc:
                    best_valid_acc = valid_acc
                m_path = '/home/wb/work/paper/paper_code/checkpoint_model/tnet-{}-{:.4f}-{:.4f}.pt'.format(epoch,
                                                                                                          valid_acc,
                                                                                                          test_loss)
                torch.save(model.state_dict(), m_path)
        metrics['loss'] = test_loss
        predictions = model_svm.predict(X)
        pdata = {
            'accuracy': accuracy_score(test_y, predictions) * 100,
            'precision': precision_score(test_y, predictions) * 100,
            'recall': recall_score(test_y, predictions) * 100,
            'f1': f1_score(test_y, predictions) * 100,
        }
        print('====> T/V set loss: {:.4f} - {:.4f} - {:.4f}'.format(test_loss, total_correct,
                                                                     len(torch.cat(all_labels).numpy())))
        print('====> SVM',pdata)

        return metrics


    best_valid_loss = float('inf')
    best_valid_acc = float(0)
    model = TNet(rnn_type='gru').to(device)
    model_svm = SVM()
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    criterion = nn.CrossEntropyLoss()
    saved_path = '/home/wb/work/paper/paper_code/checkpoint_model/tnet-39.pt'
    if os.path.exists(saved_path):
        print("Loaded checkpoint model {}".format(saved_path))
        model.load_state_dict(torch.load(saved_path))

    summary = TrainingSummaryWriter("runs/tv-devign")
    input_dataset = load_bin_devign()#max_len=320
    np.random.seed(12345)
    np.random.shuffle(input_dataset)

    train_dataset = input_dataset[len(input_dataset) // 5:]
    test_dataset = input_dataset[:len(input_dataset) // 5]
    valid_dataset = test_dataset[:len(test_dataset) // 2]
    test_dataset = test_dataset[len(test_dataset) // 2:]

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,drop_last=True)
    valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size,drop_last=True)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size,drop_last=True)

    print("data_len t={} t={} v={}".format(len(train_loader.dataset),len(test_loader.dataset),len(valid_loader.dataset)))

    from thop import profile,clever_format

    for i, batch in enumerate(train_loader):
        data, labels = batch, batch.y
        data = data.to(device)
        data_asm = data.x.squeeze(dim=1)
        flops, params = profile(model, inputs=(data_asm, labels,))
        macs, params = clever_format([flops, params], "%.3f")
        print(macs)
        print(params)
        break

    for epoch in range(1, args.epochs + 1):
        data_train = train(epoch,train_loader)
        data_eval = eval_net(epoch, test_loader)
        summary.step()
        for k, v in data_train.items():
            if k != "loss":
                summary.add_scalar('train_{}'.format(k), v * 100)
            else:
                summary.add_scalar('train_{}'.format(k), v)
        for k, v in data_eval.items():
            if k != "loss":
                summary.add_scalar('eval_{}'.format(k), v * 100)
            else:
                summary.add_scalar('eval_{}'.format(k), v)

    print('====> Valid: ')
    eval_net(epoch, valid_loader)