from PySide2.QtWidgets import QApplication, QMessageBox,QFileDialog
from PySide2.QtGui import QPixmap
from PySide2.QtUiTools import QUiLoader
from PySide2.QtWidgets import QApplication,QMainWindow
from attack import Attck,get_module,imshow
import torchvision.transforms as T
import torch
import torchvision
import Util
from threading import Thread
from PIL import Image
import matplotlib.pyplot as plt
fig,(ax1,ax2) = plt.subplots(2,1)
fig1,ax3 = plt.subplots(1,1)
fig2,ax4 = plt.subplots(1,1)


classes = ('plane', 'car', 'bird', 'cat', 'deer',
           'dog', 'frog', 'horse', 'ship', 'truck')
device = torch.device("cuda:0"if torch.cuda.is_available()else "cpu")
class To(object):
    """
    transform a img tensor's zhi to 0-1
    """
    def __init__(self):
        pass
    def __call__(self, img,*args, **kwargs):
        img = img.type(torch.float32)
        img /= 255.
        return img

class Stats:
    def __init__(self):
        self.ui = QUiLoader().load('./UI/test.ui')
        self.ui.pushButton.clicked.connect(self.handleCalc2)
        self.ui.toolButton.clicked.connect(self.setBrowerPath)
        self.ui.pushButton_2.clicked.connect(self.handleCalc1)

    def setBrowerPath(self):
        download_path = QFileDialog.getOpenFileName(self.ui.toolButton,
                                    "浏览",
                                    "E:\\")
        self.ui.lineEdit.setText(download_path[0])



    def handleCalc1(self):
        source = self.ui.comboBox.currentText()
        eps = self.ui.textEdit.toPlainText()
        k = self.ui.textEdit_2.toPlainText()
        target = self.ui.comboBox_2.currentText()
        try:
            eps = float(eps)
            k = int(k)
        except ValueError:
            print('error input')
            return
        imag_path = self.ui.lineEdit.text()
        label = self.ui.comboBox_4.currentText()
        label = classes.index(label)
        self.singleAttack(source,target,imag_path,label,eps,k)

    def singleAttack(self,source,target,imag_path,label,eps,k):
        img_PIL = Image.open(imag_path).convert('RGB')
        print(img_PIL)
        transform = T.Compose([T.PILToTensor(),To(),
                               T.Resize(32),
                               T.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616))])
        img = transform(img_PIL)
        img = torch.unsqueeze(img,0)
        print(img)

        white = get_module(source)
        black = get_module(target)
        label = torch.tensor([label])
        print(label)
        target_images = Util.Savetar(path_file='./library/library.json')
        attack = Attck(eps=eps, k=k, target_library=target_images)
        adv = attack.generate(white,source,img,(label+1)%10)

        source_label = white(img)
        out_source = white(adv)
        out_target = black(adv)

        adv_target_label = torch.argmax(out_target, dim=1)
        adv_source_label = torch.argmax(out_source, dim=1)
        source_label = torch.argmax(source_label,dim=1)
        print(source_label,adv_target_label,adv_source_label)
        imshow(torchvision.utils.make_grid(img), ax3, source_label.cpu().numpy())
        imshow(torchvision.utils.make_grid(adv), ax4, adv_target_label.cpu().numpy())
        imgpath0 = './single_attack0.jpg'
        imgpath1 = './single_attack1.jpg'
        fig1.savefig(imgpath0)
        fig2.savefig(imgpath1)
        pixmap0 = QPixmap(imgpath0)
        pixmap1 = QPixmap(imgpath1)
        self.ui.label_8.setPixmap(pixmap0)
        self.ui.label_8.setScaledContents(True)
        self.ui.label_9.setPixmap(pixmap1)
        self.ui.label_9.setScaledContents(True)


    def threadAttack(self,source,target,eps,k):
        transform = T.Compose([T.ToTensor(), T.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616))])
        """
        white model to generate adv samples 
        """
        white = get_module(source)
        """
        black model to attack
        """
        black = get_module(target)
        """
        device
        """
        black = black.to(device)
        white = white.to(device)
        batch = 4
        testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
        print(testset[0][0].size())
        testloader = torch.utils.data.DataLoader(testset, batch_size=batch, shuffle=True, num_workers=0, drop_last=True)
        target_images = Util.Savetar(path_file='./library/library.json', type='read')
        attack = Attck(eps=eps, k=k, target_library=target_images)
        uTR = 0
        error_rate = 0
        tTR = 0
        tSuc = 0
        dtTR = 0
        total = len(testset)
        for i, (imag, label) in enumerate(testloader):
            """
            device
            """
            imag = imag.to(device)
            label = label.to(device)

            adv = attack.generate(white,source,imag, (label + 1) % 10)

            out_source = white(adv)
            out_target = black(adv)

            adv_target_label = torch.argmax(out_target, dim=1)
            adv_source_label = torch.argmax(out_source, dim=1)
            if i % 100 == 0:
                print(i)
                self.ui.progressBar.setValue(int(100 * batch * i / total))
            if i == 0:
                imshow(torchvision.utils.make_grid(imag), ax1, label.cpu().numpy())
                imshow(torchvision.utils.make_grid(adv), ax2, adv_target_label.cpu().numpy())
                imgpath = f'./attack{i}.jpg'
                fig.savefig(imgpath)
                pixmap = QPixmap(imgpath)
                self.ui.label_6.setPixmap(pixmap)
                self.ui.label_6.setScaledContents(True)

            """
            some metric 
            error rate:the percentage of adversarial examples generated with fw that are misclassified by fb
            uTR:       untargeted transfer rate
            tTR:       targeted transfer rate
            tSuc:      the rate at which adversarial examples generated with fw are classified by fb as the target label
            """
            self.ui.progressBar.setValue(100)
            error_rate += torch.sum(label != adv_target_label).cpu().numpy().item()
            target_result = label != adv_target_label
            source_result = label != adv_source_label
            uTR += torch.sum((target_result == 1) & (source_result == 1)).cpu().numpy().item()
            dtTR += torch.sum(adv_source_label == ((label + 1) % 10)).cpu().numpy().item()
            tTR += torch.sum(adv_target_label == ((label + 1) % 10)).cpu().numpy().item()
            tSuc += torch.sum(adv_target_label == ((label + 1) % 10)).cpu().numpy().item()
        tTR /= dtTR
        error_rate /= total
        tSuc /= total
        uTR /= total
        print(f'error_rate = {error_rate}\n'
              f'tTR = {tTR}\n'
              f'tSuc = {tSuc}\n'
              f'uTR = {uTR}')
        self.ui.textEdit_3.append(f'error_rate = {error_rate}\n'
                                  f'tTR = {tTR}\n'
                                  f'tSuc = {tSuc}\n'
                                  f'uTR = {uTR}')

    def handleCalc2(self):
        source = self.ui.comboBox.currentText()
        eps = self.ui.textEdit.toPlainText()
        k = self.ui.textEdit_2.toPlainText()
        target = self.ui.comboBox_2.currentText()
        self.ui.progressBar.setValue(0)
        try:
            eps = float(eps)
            k = int(k)
        except ValueError:
            print('error input')
            return
        t1 = Thread(target=self.threadAttack,args=(source,target,int(eps),int(k)))
        t1.setDaemon(True)
        t1.start()


app = QApplication.instance()
if app is None:
    app = QApplication([])
stats = Stats()
stats.ui.show()
app.exec_()

