"""
By Jiyuan Liu (liujiyuan13@163.com), Jun. 28, 2021.
All rights reserved.
"""

import torch
from torch import nn
import torch.nn.init as init
from util import *

METHOD = 'mv_ablation_study'
ind_view = 0


class FcNet(nn.Module):
    def __init__(self, dims):
        super(FcNet, self).__init__()

        self.dims = dims
        self.net = nn.Sequential()
        for id in range(len(self.dims) - 2):
            self.net.add_module('fc_{}'.format(id), nn.Linear(self.dims[id], self.dims[id + 1]))
            self.net.add_module('relu_{}'.format(id), nn.ReLU(True))

        # last layer
        id = len(self.dims) - 2
        self.net.add_module('fc_{}'.format(id), nn.Linear(self.dims[id], self.dims[id + 1]))
        # self.net.add_module('relu_{}'.format(id), nn.ReLU(True))

    def forward(self, x):
        x = self.net(x)
        return x


class Model(nn.Module):
    def __init__(self, enc_shapes, cla_shape, learning_rate, device):
        super(Model, self).__init__()

        self.device = device
        self.lr = learning_rate
        self.num_view = len(enc_shapes)
        self.h_len = enc_shapes[0][-1]

        assert enc_shapes[0][-1] * (self.num_view+1) == cla_shape[0]

        # encoder nets
        for i in range(self.num_view):
            exec('self.enc_net_{} = FcNet(enc_shapes[{}]).to(self.device)'.format(i, i))

        # classification net
        self.cla_net = FcNet(cla_shape).to(self.device)

        # multiplexer weights
        for i in range(self.num_view):
            exec('self.W_{} = nn.Parameter(torch.rand([self.h_len, self.h_len], device=self.device), requires_grad=True)'.format(i))

        # optimizer: all net weights are trainable
        for name, param in self.named_parameters():
            param.requires_grad = True
        self.optim = torch.optim.Adam(filter(lambda p: p.requires_grad, self.parameters()), lr=self.lr)

        # init weights
        self._initialize_weights()

    def forward(self, Xs):
        # prerequisites
        assert len(Xs) == self.num_view

        # encoder nets
        hs = []
        for i in range(self.num_view):
            exec('hs.append(self.enc_net_{}(Xs[{}]))'.format(i, i))

        # fuse embeddings
        ## multiplexer
        h_c = torch.matmul(hs[0], self.W_0)
        for i in range(1,self.num_view):
            exec('h_c += torch.matmul(hs[{}], self.W_{})'.format(i,i))

        ## horizontal cat
        hs.append(h_c)
        h = torch.cat(hs, dim=1)

        # classification net
        x = self.cla_net(h)

        return x

    def _initialize_weights(self):

        # for layers
        for m in self.modules():
            if isinstance(m, nn.Linear):
                init.xavier_uniform_(m.weight, gain=1)

        # for W_i
        for i in range(self.num_view):
            exec('init.xavier_uniform_(self.W_{}, gain=1)'.format(i))

    def loss_fun(self):
        return nn.CrossEntropyLoss()

    def get_batch(self, ind, batch_size):
        ind_r = ind.copy()
        np.random.shuffle(ind_r)
        batches, num = [], len(ind_r) // batch_size
        for i in range(num):
            batches.append(ind_r[i * batch_size:(i + 1) * batch_size])
        # for the left
        flag = len(ind_r) % batch_size
        if flag != 0:
            more = batch_size - flag
            ind_rr = ind_r.copy()
            np.random.shuffle(ind_rr)
            tmp = np.hstack((ind_r[num * batch_size:], ind_rr[0:more]))
            batches.append(tmp)
        return batches

    def train_net(self, Xs, gt, batch_size, epoch, Xs_te, gt_te):
        # get batches
        ind = np.arange(gt.shape[0])
        batches = self.get_batch(ind, batch_size)

        # perpare data
        Xs = [torch.tensor(x, dtype=torch.float32, device=self.device) for x in Xs]
        Y = torch.tensor(gt, dtype=torch.int64, device=self.device)

        # batch trainning
        loss, y = 0, np.zeros(len(ind))
        for batch in batches:
            Xs_tmp = [x[batch, :] for x in Xs]
            y_batch = self.forward(Xs_tmp)
            # loss_w = torch.sum(self.W_0**2) + torch.sum(self.W_1**2) + torch.sum(self.W_2**2)
            loss_batch = self.loss_fun()(y_batch, Y[batch])
            # backward
            self.optim.zero_grad()
            loss_batch.backward()
            self.optim.step()
            # get loss, y
            loss += loss_batch.item()
            y[batch] = np.argmax(y_batch.data.detach().cpu().numpy(), axis=1)

        # # for train dataset
        # acc, _, _ = metric(gt, y)

        # for test dataset
        loss_te, y_te = self.test_net(Xs_te, gt_te)
        fpr, fnr, error_rate = metric(gt_te, y_te, labels=[1, 0], percent=True)
        print('-- epoch: {}, loss: {:.3f}, fpr: {:.3f}, fnr: {:.3f}, error_rate: {:.3f}'.format(epoch, loss_te, fpr, fnr, error_rate))

        return loss_te, fpr, fnr, error_rate

    def test_net(self, Xs, gt):

        # perpare data
        Xs = [torch.tensor(x, dtype=torch.float32, device=self.device) for x in Xs]
        Y = torch.tensor(gt, dtype=torch.int64, device=self.device)

        y = self.forward(Xs)
        loss = self.loss_fun()(y, Y).item()

        y = np.argmax(y.data.detach().cpu().numpy(), axis=1)

        return loss/Y.shape[0], y

    def save(self, path):
        save_dict = self.state_dict()
        torch.save(save_dict, path)

        return

    def load(self, path):
        state_dict, tmp = torch.load(path), self.state_dict()
        tmp.update(state_dict)
        self.load_state_dict(tmp)

        return




if __name__ == "__main__":

    # device config
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # dir config
    # data_name = 'Android_Mischief_V2'                    # no use
    # data_name = 'CTU_Encrypted_Malware_Traffic'          # no use
    # data_name = 'CTU_Encrypted_Malware_Traffic_2_class'  # no use
    # data_name = 'CTU_Normal_vs_Adw'
    # data_name = 'CTU_Normal_vs_Drp'
    # data_name = 'CTU_Normal_vs_Rtk'
    # data_name = 'CTU_Normal_vs_Susp'
    # data_name = 'CTU_Normal_vs_Trj'                      # no use
    data_name = 'CTU_Normal_vs_Adw_Drp_Rtk_Susp'
    print('# ', data_name)
    # data_dir = 'D:/Work/datasets/mData/maldata/amd/'     # no use
    data_dir = 'data/'
    model_dir = 'model/'
    split_dir = 'split/'
    res_dir = 'res/'

    # load data
    print('- load data ...')
    X, Y = load_data(data_dir + data_name + '.mat')
    X = [X[ind_view]]
    fea_dims = [x.shape[1] for x in X]
    num_view, num_sample, num_class = len(X), Y.shape[0], np.unique(Y).shape[0]

    # train-test split
    print('- split data ...')
    load_split = True
    if load_split:
        ind = np.load(split_dir + data_name + '_split.npz')
        ind_tr, ind_te = ind['ind_tr'], ind['ind_te']
    else:
        ind_tr, ind_te = gen_split(Y, tr_ratio=0.8)
        np.savez(split_dir + data_name + '_split.npz', ind_tr=ind_tr, ind_te=ind_te)
    Xs_tr, gt_tr = [x[ind_tr,:] for x in X], Y[ind_tr]
    Xs_te, gt_te = [x[ind_te,:] for x in X], Y[ind_te]

    # network config
    enc_shapes = [[fea_dims[i], 16, num_class] for i in range(num_view)]
    cla_shape = [num_class * (num_view+1), 16, num_class]
    lr = 1e-3
    model = Model(enc_shapes, cla_shape, lr, device)

    # train or test
    train = True
    if train:
        # train
        print('- train ...')
        batch_size = 512
        epochs = 100
        losses, fprs, fnrs, error_rates = [], [], [], []
        for epoch in range(epochs):
            loss, fpr, fnr, error_rate = model.train_net(Xs_tr, gt_tr, batch_size, epoch, Xs_te, gt_te)
            losses.append(loss)
            fprs.append(fpr)
            fnrs.append(fnr)
            error_rates.append(error_rate)
        # plot loss and acc
        twin_plot(range(epochs), losses, error_rates)
        # save
        print('- save ...')
        model.save(model_dir + data_name + '_' + METHOD + '_view_' + str(ind_view) + '.w')

    # load
    print('- load ...')
    model.load(model_dir + data_name + '_' + METHOD + '_view_' + str(ind_view) + '.w')
    # test
    print('- test ...')
    loss_te, y_te = model.test_net(Xs_te, gt_te)

    # evaluate
    print('- eval ...')
    fpr, fnr, error_rate = metric(gt_te, y_te, labels=[1, 0], percent=True)
    print('# fpr: {:.3f}, fnr: {:.3f}, error_rate: {:.3f}'.format(fpr, fnr, error_rate))

    # save result
    np.savez(res_dir + data_name + '_' + METHOD + '_view_' + str(ind_view) + '_res.npz', data_name=data_name, gt_te=gt_te, y_te=y_te,
             losses=losses, fnrs=fnrs, fprs=fprs, error_rates=error_rates)

