# encoding:utf-8
# -----------------------------------------------------------
# "ML-Stealer: Stealing Prediction Functionality of Machine Learning Models with Mere Black-Box Access"
# @author: Shijie Wang, 2019.
# ------------------------------------------------------------

"""
WGAN-GP Model
"""
import argparse
import os
import random

import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data

import pandas as pd
import numpy as np
from sklearn.externals import joblib
from torch.utils.data import Dataset, DataLoader


# custom weights initialization called on netG and netD
def weight_init(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        nn.init.xavier_uniform_(m.weight)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


class AdultDataset(Dataset):
    def __init__(self, csv_file):
        self.adult_data = pd.read_csv(csv_file, sep=',')

    def __len__(self):
        return len(self.adult_data)

    def __getitem__(self, idx):
        return torch.tensor(np.array(self.adult_data.iloc[[idx]])).float().squeeze()


class Generator(nn.Module):
    def __init__(self, ngpu):
        super(Generator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Xf, going into a FC
            nn.Linear(in_features=14, out_features=32, bias=True),
            nn.ReLU(True),
            # state size.
            nn.Linear(in_features=32, out_features=128, bias=True),
            nn.ReLU(True),
            # state size.
            nn.Linear(in_features=128, out_features=128, bias=True),
            nn.ReLU(True),
            # state size.
            nn.Linear(in_features=128, out_features=128, bias=True),
            nn.ReLU(True),
            # state size.
            nn.Linear(in_features=128, out_features=32, bias=True),
            nn.BatchNorm1d(32),
            nn.ReLU(True),
            # state size.
            nn.Linear(in_features=32, out_features=2, bias=True),
            nn.Softmax(dim=1)
            # state size.
        )

    def forward(self, inputs):
        if inputs.is_cuda and self.ngpu > 1:
            outputs = nn.parallel.data_parallel(self.main, inputs, range(self.ngpu))
        else:
            outputs = self.main(inputs)
        return outputs


class Discriminator(nn.Module):
    def __init__(self, ngpu):
        super(Discriminator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # inputs is probability distribution
            nn.Linear(in_features=2, out_features=128, bias=True),
            nn.ReLU(inplace=True),
            # state size.
            nn.Linear(in_features=128, out_features=128, bias=True),
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            # state size.
            nn.Linear(in_features=128, out_features=1, bias=True),
        )

    def forward(self, inputs):
        if inputs.is_cuda and self.ngpu > 1:
            outputs = nn.parallel.data_parallel(self.main, inputs, range(self.ngpu))
        else:
            outputs = self.main(inputs)

        return outputs.view(-1, 1).squeeze(1)


def compute_gradient_penalty(net, real_samples, fake_samples, device, lambda_gp):
    """Calculate the gradient penalty loss for WGAN-GP"""
    # Random weight term for interpolation between real and fake samples
    alpha = torch.rand(real_samples.size(0), 1, device=device)
    # Get random interpolation between real and fake samples
    interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
    d_interpolates = net(interpolates)
    fake = torch.full((real_samples.size(0), ), 1, device=device)
    # Get gradient w.r.t interpolates
    gradients = autograd.grad(
        outputs=d_interpolates,
        inputs=interpolates,
        grad_outputs=fake,
        create_graph=True,
        retain_graph=True,
        only_inputs=True,
    )[0]
    gradients = gradients.view(gradients.size(0), -1)
    gradients_penaltys = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * lambda_gp
    return gradients_penaltys


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataroot', required=True, help='path to dataset')
    parser.add_argument('--victimmodel', required=True, help='path to victim model')
    parser.add_argument('--workers', type=int, default=4, help='number of data loading workers')
    parser.add_argument('--batchSize', type=int, default=500, help='inputs batch size')
    parser.add_argument('--niter', type=int, default=100, help='number of epochs to train for')
    parser.add_argument('--n_critic', type=int, default=5, help='number of training steps for discriminator per iter')
    parser.add_argument('--lr', type=float, default=0.0001, help='learning rate')
    parser.add_argument('--cuda', action='store_true', help='enables cuda')
    parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
    parser.add_argument('--netG', default='', help='path to netG')
    parser.add_argument('--netD', default='', help='path to netD')
    parser.add_argument('--outf', default='.', help='folder to output')
    parser.add_argument('--manualSeed', type=int, help='manual seed')
    parser.add_argument('--model', type=str, default='train', help='GAN train models.default: \'train\'. other: gen')

    args = parser.parse_args()
    print(args)

    try:
        os.makedirs(args.outf)
    except OSError:
        pass

    if args.manualSeed is None:
        args.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", args.manualSeed)
    random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not args.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    adult_dataset = AdultDataset(csv_file=args.dataroot)
    adult_dataloader = DataLoader(adult_dataset, batch_size=args.batchSize,
                                  shuffle=True, num_workers=args.workers, pin_memory=True)

    device = torch.device("cuda" if args.cuda else "cpu")
    ngpu = int(args.ngpu)

    # Loss weight for gradient penalty
    lambda_gp = 10

    netG = Generator(ngpu).to(device)
    netG.apply(weight_init)
    if args.netG != '':
        netG = torch.load(args.netG)

    netD = Discriminator(ngpu).to(device)
    netD.apply(weight_init)
    if args.netD != '':
        netD = torch.load(args.netD)

    model = joblib.load(args.victimmodel)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=args.lr, betas=(0.5, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=args.lr, betas=(0.5, 0.999))

    for epoch in range(args.niter):
        for i, data in enumerate(adult_dataloader):
            # configure input
            netG.train()
            netD.train()
            data = data.to(device)
            if i % len(adult_dataloader) != 99:

                # -------------------
                # Train Discriminator
                # -------------------
                netD.zero_grad()
                optimizerG.zero_grad()

                # Generator a batch of images
                fake_prediction = netG(data)
                real_prediction = torch.stack(
                    [torch.from_numpy(model.predict_proba(data.cpu().numpy()))]).squeeze().float().to(device)
                real_prediction.requires_grad = True

                # Real
                real_validity = netD(real_prediction)
                # Fake
                fake_validity = netD(fake_prediction.detach())
                # Gradient penalty
                gradient_penalty = compute_gradient_penalty(netD, real_prediction, fake_prediction.data, device,
                                                            lambda_gp)

                # Loss measures generator's ability to fool the discriminator
                Wasserstein_D = -torch.mean(real_validity) + torch.mean(fake_validity)
                errD = Wasserstein_D + gradient_penalty
                errD.backward()
                optimizerD.step()

                # Train the generator every n_critic iterations
                if i % args.n_critic == 0:
                    # -------------------
                    # Train Generator
                    # -------------------
                    # Generate a batch of images
                    fake_prediction = netG(data)
                    # Adversarial loss
                    errG = -torch.mean(netD(fake_prediction))
                    errG.backward()
                    optimizerG.step()

                print(f'[{epoch + 1}/{args.niter}][{i}/{len(adult_dataloader)}] '
                      f'Loss_D: {errD.item():.4f} '
                      f'Loss_G: {errG.item():.4f} '
                      f'Wasserstein_D: {Wasserstein_D.item():.4f}')
            if i % len(adult_dataloader) == 99:
                predict = netG(data).detach().cpu().numpy()
                real = model.predict_proba(data.cpu().numpy())
                error_num = 0
                MSELoss = ((np.max(real, axis=1) - predict[np.arange(len(data)), np.argmax(real, axis=1)]) ** 2).sum()
                for i in range(len(data)):
                    if predict[i, np.argmax(real, axis=1)[i]] < 0.5:
                        error_num += 1
                ErrorRate = error_num / len(data)
                # print('fake_prdict:{}, real_predict:{}'.format(predict, real))
                print('MSE_Loss:{}, Error_Rate:{}'.format(MSELoss, ErrorRate))

        if epoch % 50 == 49:
            torch.save(netG, f'{args.outf}/netG_no_svm_{epoch + 1}.pth')


if __name__ == '__main__':
    main()
