# encoding: utf-8
# @Time:    :2024/12/20 21:59

import datetime
import time

import torch
from tqdm import tqdm
from torch.utils import data
from torch import functional as F
from models import model, load_datasets, batch_size, device
from models import MyDataset

# 对抗学习方法
from adversials import FGSM, FGM, PGD, FreeAT

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=1e-4)


def train(model, train_datasets, test_datasets, device, optimizer, criterion, batch_size: int = 64, epoch: int = 10):
    train_loader = data.DataLoader(train_datasets, batch_size=batch_size)
    test_loader = data.DataLoader(test_datasets, batch_size=batch_size)
    for one_epoch in range(epoch + 1):
        start = time.time()
        loss_sum = 0.0
        accu = 0.0
        model.train()
        for token_ids, label in tqdm(train_loader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()

            outputs = model(token_ids)
            loss = criterion(outputs, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            loss_sum += loss.cpu().data.numpy()
            accu += (outputs.argmax(1) == label).sum().cpu().data.numpy()

        test_loss_sum = 0.0
        test_accu = 0.0
        model.eval()
        for token_ids, label in tqdm(test_loader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()
            with torch.no_grad():
                out = model(token_ids)
                loss = criterion(out, label)
                test_loss_sum += loss.cpu().data.numpy()
                test_accu += (out.argmax(1) == label).sum().cpu().data.numpy()
        print("epoch %d, train loss:%f, train acc:%f, test loss:%f, test acc:%f, use time:" % (
            one_epoch, loss_sum / len(train_datasets), accu / len(train_datasets), test_loss_sum / len(test_datasets),
            test_accu / len(test_datasets)), int(time.time() - start))


def do_train():
    train_datasets, test_datasets = load_datasets()
    train(model, train_datasets, test_datasets, device, optimizer, criterion, batch_size)


def train_fgsm(model, train_datasets, test_datasets, device, optimizer, criterion, batch_size: int = 64, epoch: int = 10):
    fgsm = FGSM(model=model)
    train_loader = data.DataLoader(train_datasets, batch_size=batch_size)
    test_loader = data.DataLoader(test_datasets, batch_size=batch_size)
    for one_epoch in range(epoch + 1):
        start = time.time()
        loss_sum = 0.0
        accu = 0.0
        model.train()
        for token_ids, label in tqdm(train_loader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()

            outputs = model(token_ids)
            loss = criterion(outputs, label)
            optimizer.zero_grad()
            loss.backward()
            fgsm.attack()  # 在embedding上添加对抗扰动
            outputs = model(token_ids)
            loss_adv = criterion(outputs, label)
            loss_adv.backward()  # 反向传播，并在正常的grad基础上，累加对抗训练的梯度
            fgsm.restore()  # 恢复embedding参数
            optimizer.step()

            loss_sum += loss.cpu().data.numpy()
            accu += (outputs.argmax(1) == label).sum().cpu().data.numpy()

        test_loss_sum = 0.0
        test_accu = 0.0
        model.eval()
        for token_ids, label in tqdm(test_loader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()
            with torch.no_grad():
                out = model(token_ids)
                loss = criterion(out, label)
                test_loss_sum += loss.cpu().data.numpy()
                test_accu += (out.argmax(1) == label).sum().cpu().data.numpy()
        print("epoch %d, train loss:%f, train acc:%f, test loss:%f, test acc:%f, use time:" % (
            one_epoch, loss_sum / len(train_datasets), accu / len(train_datasets), test_loss_sum / len(test_datasets),
            test_accu / len(test_datasets)), int(time.time() - start))


def do_train_fgsm():
    train_datasets, test_datasets = load_datasets()
    train_fgsm(model, train_datasets, test_datasets, device, optimizer, criterion, batch_size)


def train_fgm(model, train_datasets, test_datasets, device, optimizer, criterion, batch_size: int = 64, epoch: int = 10):
    fgm = FGM(model=model)
    train_loader = data.DataLoader(train_datasets, batch_size=batch_size)
    test_loader = data.DataLoader(test_datasets, batch_size=batch_size)
    for one_epoch in range(epoch + 1):
        start = time.time()
        loss_sum = 0.0
        accu = 0.0
        model.train()
        for token_ids, label in tqdm(train_loader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()

            outputs = model(token_ids)
            loss = criterion(outputs, label)
            loss.backward()

            # attack
            fgm.attack()
            outputs = model(token_ids)
            loss_adv = criterion(outputs, label)
            loss_adv.backward()
            fgm.restore()

            optimizer.step()
            optimizer.zero_grad()

            loss_sum += loss.cpu().data.numpy()
            accu += (outputs.argmax(1) == label).sum().cpu().data.numpy()

        test_loss_sum = 0.0
        test_accu = 0.0
        model.eval()
        for token_ids, label in tqdm(test_loader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()
            with torch.no_grad():
                out = model(token_ids)
                loss = criterion(out, label)
                test_loss_sum += loss.cpu().data.numpy()
                test_accu += (out.argmax(1) == label).sum().cpu().data.numpy()
        print("epoch %d, train loss:%f, train acc:%f, test loss:%f, test acc:%f, use time:" % (
            one_epoch, loss_sum / len(train_datasets), accu / len(train_datasets), test_loss_sum / len(test_datasets),
            test_accu / len(test_datasets)), int(time.time() - start))


def do_train_fgm():
    train_datasets, test_datasets = load_datasets()
    train_fgm(model, train_datasets, test_datasets, device, optimizer, criterion, batch_size)


def train_pgd(model, train_datasets, test_datasets, device, optimizer, criterion, batch_size: int = 64, epoch: int = 10):
    pgd = PGD(model)
    train_loader = data.DataLoader(train_datasets, batch_size=batch_size)
    test_loader = data.DataLoader(test_datasets, batch_size=batch_size)
    for one_epoch in range(epoch + 1):
        start = time.time()
        loss_sum = 0.0
        accu = 0.0
        model.train()
        for token_ids, label in tqdm(train_loader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()

            outputs = model(token_ids)
            loss = criterion(outputs, label)
            loss.backward()

            # attack
            pgd_k = 3
            pgd.backup_grad()
            for _t in range(pgd_k):
                pgd.attack(is_first_attack=(_t == 0))
                if _t != pgd_k - 1:
                    model.zero_grad()
                else:
                    pgd.restore_grad()
                outputs = model(token_ids)
                loss_adv = criterion(outputs, label)
                loss_adv.backward()
            pgd.restore()

            optimizer.zero_grad()
            optimizer.step()

            loss_sum += loss.cpu().data.numpy()
            accu += (outputs.argmax(1) == label).sum().cpu().data.numpy()

        test_loss_sum = 0.0
        test_accu = 0.0
        model.eval()
        for token_ids, label in tqdm(test_loader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()
            with torch.no_grad():
                out = model(token_ids)
                loss = criterion(out, label)
                test_loss_sum += loss.cpu().data.numpy()
                test_accu += (out.argmax(1) == label).sum().cpu().data.numpy()
        print("epoch %d, train loss:%f, train acc:%f, test loss:%f, test acc:%f, use time:" % (
            one_epoch, loss_sum / len(train_datasets), accu / len(train_datasets), test_loss_sum / len(test_datasets),
            test_accu / len(test_datasets)), int(time.time() - start))


def do_train_pgd():
    train_datasets, test_datasets = load_datasets()
    train_pgd(model, train_datasets, test_datasets, device, optimizer, criterion, batch_size)


def train_free_at(model, train_datasets, test_datasets, device, optimizer, criterion, batch_size: int = 64, epoch: int = 10):
    free_at = FreeAT(model)
    train_loader = data.DataLoader(train_datasets, batch_size=batch_size)
    test_loader = data.DataLoader(test_datasets, batch_size=batch_size)
    for one_epoch in range(epoch + 1):
        start = time.time()
        loss_sum = 0.0
        accu = 0.0
        model.train()
        for token_ids, label in tqdm(train_loader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()

            outputs = model(token_ids)
            loss = criterion(outputs, label)
            loss.backward()

            # attack
            m = 5
            free_at.backup_grad()
            for _t in range(m):
                free_at.attack(is_first_attack=(_t == 0))
                if _t != m - 1:
                    model.zero_grad()
                else:
                    free_at.restore_grad()
                outputs = model(token_ids)
                loss_adv = criterion(outputs, label)
                loss_adv.backward()

            free_at.restore_grad()
            optimizer.step()
            optimizer.zero_grad()

            loss_sum += loss.cpu().data.numpy()
            accu += (outputs.argmax(1) == label).sum().cpu().data.numpy()

        test_loss_sum = 0.0
        test_accu = 0.0
        model.eval()
        for token_ids, label in tqdm(test_loader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()
            with torch.no_grad():
                out = model(token_ids)
                loss = criterion(out, label)
                test_loss_sum += loss.cpu().data.numpy()
                test_accu += (out.argmax(1) == label).sum().cpu().data.numpy()
        print("epoch %d, train loss:%f, train acc:%f, test loss:%f, test acc:%f, use time:" % (
            one_epoch, loss_sum / len(train_datasets), accu / len(train_datasets), test_loss_sum / len(test_datasets),
            test_accu / len(test_datasets)), int(time.time() - start))


def do_train_free_at():
    train_datasets, test_datasets = load_datasets()
    train(model, train_datasets, test_datasets, device, optimizer, criterion, batch_size)


if __name__ == "__main__":
    # do_train()
    # do_train_fgsm()
    # do_train_fgm()
    # do_train_pgd()
    do_train_free_at()
