from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm

"""### data & metadata

"""

# 加载mnist数据集
test_loader = torch.utils.data.DataLoader(
    datasets.CIFAR10('../test_data', train=False, download=True, transform=transforms.Compose([
            transforms.ToTensor(), transforms.Resize((32, 32)),
            ])),
        batch_size=10, shuffle=True)
train_loader = torch.utils.data.DataLoader(
    datasets.CIFAR10('../test_data', train=True, download=True, transform=transforms.Compose([
            transforms.ToTensor(), transforms.Resize((32, 32)),
            ])),
        batch_size=10, shuffle=True)

# 超参数设置
batch_size = 10
epoch = 1
learning_rate = 0.001


# LeNet Model definition
class Net(nn.Module):
  def __init__(self):
    super(Net, self).__init__()
    # 卷积层 (32x32x3的图像)
    self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
    # 卷积层(16x16x16)
    self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
    # 卷积层(8x8x32)
    self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
    # 最大池化层
    self.pool = nn.MaxPool2d(2, 2)
    # linear layer (64 * 4 * 4 -> 500)
    self.fc1 = nn.Linear(64 * 4 * 4, 500)
    # linear layer (500 -> 10)
    self.fc2 = nn.Linear(500, 10)
    # dropout层 (p=0.3)
    self.dropout = nn.Dropout(0.3)

  def forward(self, x):
    # add sequence of convolutional and max pooling layers
    x = self.pool(F.relu(self.conv1(x)))
    x = self.pool(F.relu(self.conv2(x)))
    x = self.pool(F.relu(self.conv3(x)))
    # flatten image input
    x = x.contiguous().view(-1, 64 * 4 * 4)
    # add dropout layer
    x = self.dropout(x)
    # add 1st hidden layer, with relu activation function
    x = F.relu(self.fc1(x))
    # add dropout layer
    x = self.dropout(x)
    # add 2nd hidden layer, with relu activation function
    x = self.fc2(x)
    return x

# 选择设备
device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")

# 初始化网络，并定义优化器
simple_model = Net().to(device)
optimizer1 = torch.optim.SGD(simple_model.parameters(),lr = learning_rate,momentum=0.9)
# print (simple_model)
loss_func = nn.CrossEntropyLoss()

# 训练模型
def train(model,optimizer):
  for i in range(epoch):
    for j,(data,target) in tqdm(enumerate(train_loader)):
      data = data.to(device)
      target = target.to(device)
      logit = model(data)
      # print(target, logit)
      loss = loss_func(logit,target)
      model.zero_grad()
      # 如下：因为其中的loss是单个tensor就不能用加上一个tensor的维度限制
      loss.backward()
      optimizer.step()
      if j % 1000 == 0:
        print ('第{}个数据，loss值等于{}'.format(j,loss))
train(simple_model,optimizer1)

simple_model.eval()


"""### metaParsing"""

import foolbox as fb  
from foolbox.criteria import TargetedMisclassification

# 通过fb.PyTorchModel封装成的类，其fmodel使用与我们训练的simple_model基本一样
fmodel = fb.PyTorchModel(simple_model,bounds=(0,1))
# 如下代码dataset可以选择cifar10,cifar100,imagenet,mnist等。其图像格式是channel_first
# 由于fmodel设置了bounds，如下代码fb.utils.samples获得的数据，会自动将其转化为bounds范围中
images, labels = fb.utils.samples(fmodel, dataset='cifar10', batchsize=10)


"""### parsing classes to dict"""

import time

# 有目标攻击实现
def target():
  start = time.time()
  criterion = TargetedMisclassification(torch.tensor([3]*batch_size,device = device))# 只有1是可以的？
  # 如下一行代码，定义攻击类型，其中攻击参数在此给定，参考github相应源码
  attack = fb.attacks.BoundaryAttack()
  # 如下一行代码所示，实现有目标攻击。如下写法具有普适性
  raw, clipped, is_adv = attack(fmodel,images.to(device),epsilons=0.5,criterion = criterion)
  adver_target = torch.max(fmodel(raw),1)[1]
  end = time.time()
  print ('CW target running {} seconds using google colab GPU'.format((end-start)))


target()
# untarget()
