# -- coding: utf-8 --
import numpy as np
import torch

from models.model_base import ModelBase
from models.base.init_utils import weights_init
from configs import *
from utils.network_utils import get_network
from utils.data_utils import get_dataloader
from pruner.pruning import *
from train_test import *
from copy import deepcopy
from utils import common_utils
from torch import autograd

def trans_train_prune(mb,net,config,trainloader,testloader):
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=config.pre_lr)
    weights = []
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            weights.append(i.weight)
    for epoch in range(config.pre_epoch):
        net.train()
        correct = 0
        total = 0
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            loss = criterion(outputs, targets)
            _g = autograd.grad(loss, weights, create_graph=True)
            l = 0
            for i in _g:
                l += torch.pow(i, 2).sum()
            l.backward()
            optimizer.step()
            mb.apply_masks()
            if (batch_idx % 100 == 0):
                print('train[epoch:{}]-----acc:{:.2f}%'.format(epoch, correct / total * 100, ))
        loss, acc = test(net, testloader, criterion, epoch)

    mb.unregister_mask()

    optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
    for epoch in range(10):
        net.train()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

    masks, _ = Pruner(mb, trainloader, 'cuda', config)
    mb.register_mask(masks)
    print_mask_information(mb)




def train_eval_loop_0(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    weight_norm=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()
        norm=0
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                norm+=(torch.norm(i.weight.data,1).sum().item())
        weight_norm.append(norm)
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    np.save('ori_weight_norm', weight_norm)
    torch.save(net.state_dict(),'ori_model.pth')

def train_eval_loop_1(mb, trainloader, testloader, config):
    ori_loss = []
    ori_acc = []
    prune_mask=[]
    remain_mask=[]
    prune_grad=[]
    remain_grad=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for _,m in mb.masks.items():
        prune_mask.append(torch.clone(-1*(m-1)).detach())
        remain_mask.append(torch.clone(m).detach())
    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item() * targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if (batch_idx % 100 == 0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch, train_loss / total,
                                                                                 correct / total * 100, _lr))
            mb.apply_masks()

        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            remain_g=0
            prune_g=0
            n=0
            total_num=0
            for i in net.modules():
                if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                    total_num+=i.weight.data.numel()
                    remain_g+=torch.pow(i.weight.grad*remain_mask[n],2).sum().item()
                    prune_g+=torch.pow(i.weight.grad*prune_mask[n],2).sum().item()
                    n+=1
            prune_grad.append(prune_g)
            remain_grad.append(remain_g)
            optimizer.zero_grad()
            mb.apply_masks()
            break

        lr_scheduler.step()
        loss, acc = test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    np.save('remain_grad',remain_grad)
    np.save('prune_grad',prune_grad)
    print(total_num)
    torch.save(net.state_dict(), 'ori_model.pth')

def train_eval_loop_2(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    prune_grad=[]
    remain_grad=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    remain_mask = []
    pruned_mask = []
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            remain_mask.append(mb.masks[i])
            pruned_mask.append(-1 * (mb.masks[i] - 1))
    for epoch in range(num_epochs):
        net.train()
        if epoch in [1,5]:
            trans_train_prune(mb,net,config,trainloader,testloader)
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()

        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
        remain_mask = []
        pruned_mask = []
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                remain_mask.append(mb.masks[i])
                pruned_mask.append(-1 * (mb.masks[i] - 1))
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            remain_g=0
            prune_g=0
            n = 0
            for i in net.modules():
                if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                    remain_g+=torch.pow(torch.clone(i.weight.grad.detach())*remain_mask[n],2).sum().item()
                    prune_g += torch.pow(torch.clone(i.weight.grad.detach()) * pruned_mask[n], 2).sum().item()
                    n += 1
            prune_grad.append(prune_g)
            remain_grad.append(remain_g)
            optimizer.zero_grad()
            break

    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    np.save('prune_grad',prune_grad)
    np.save('remain_grad',remain_grad)
    torch.save(net.state_dict(),'ori_model.pth')

def train_eval_loop_3(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    transfer_mask=[]
    for n,(i,mask) in enumerate(mb.masks.items()):
        if isinstance(i, nn.Conv2d):
            transfer_mask.append(torch.clone(mask).detach())
            for j in range(mask.shape[0]):
                for k in range(mask.shape[1]):
                    if torch.equal(mask[j][k], torch.zeros_like(mask[j][k])):
                        continue
                    transfer_mask[n][j][k]=-1*(transfer_mask[n][j][k]-1)
    pre_optimizer = optim.SGD(net.parameters(), lr=config.pre_lr, momentum=0.9, weight_decay=weight_decay)

    weights = []
    for layer in net.modules():
        if isinstance(layer, nn.Conv2d):
            weights.append(layer.weight)

    for epoch in range(config.pre_epoch):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            t = targets.size(0)
            pre_optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            _grad = autograd.grad(loss, weights, create_graph=True)
            g = 0
            for i, j in zip(_grad, transfer_mask):
                g += (i * j).pow(2).sum()
            g = g / t
            g.backward()
            pre_optimizer.step()
            mb.apply_masks()
        print(g)
        loss, acc = test(net, testloader, criterion, epoch)
    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)



    torch.save(net.state_dict(),'ori_model.pth')

def train_eval_loop_4(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    transfer_mask=[]
    for n,(i,mask) in enumerate(mb.masks.items()):
        if isinstance(i, nn.Conv2d):
            transfer_mask.append(torch.clone(mask).detach())
            for j in range(mask.shape[0]):
                for k in range(mask.shape[1]):
                    if torch.equal(mask[j][k], torch.zeros_like(mask[j][k])):
                        transfer_mask[n][j][k]=(transfer_mask[n][j][k]+1)
                    else:
                        transfer_mask[n][j][k]=torch.zeros_like(transfer_mask[n][j][k])
    pre_optimizer = optim.SGD(net.parameters(), lr=config.pre_lr, momentum=0.9, weight_decay=weight_decay)

    weights = []
    for layer in net.modules():
        if isinstance(layer, nn.Conv2d):
            weights.append(layer.weight)

    for epoch in range(config.pre_epoch):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            t = targets.size(0)
            pre_optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            _grad = autograd.grad(loss, weights, create_graph=True)
            g = 0
            for i, j in zip(_grad, transfer_mask):
                g += (i * j).pow(2).sum()
            g = g / t
            g.backward()
            pre_optimizer.step()
            mb.apply_masks()
        print(g)
        loss, acc = test(net, testloader, criterion, epoch)
    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)



    torch.save(net.state_dict(),'ori_model.pth')

def train_eval_loop_5(mb, trainloader, testloader, config):
    #在每个epoch进行特征转移，看性能如何
    ori_loss=[]
    ori_acc=[]
    weight_norm=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        weights = []
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                weights.append(i.weight)
        optimizer2 = optim.SGD(net.parameters(), lr=optimizer.state_dict()['param_groups'][0]['lr'] * 0.01, momentum=0.9, weight_decay=weight_decay)
        singal=1
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            if singal==1:
                inputs, targets = inputs.cuda(), targets.cuda()
                optimizer.zero_grad()
                outputs = net(inputs)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()
                train_loss += loss.item()*targets.size(0)
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()
                singal=-1*singal
            if singal==-1:
                inputs, targets = inputs.cuda(), targets.cuda()
                optimizer.zero_grad()
                outputs = net(inputs)
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()
                loss = criterion(outputs, targets)
                train_loss += loss.item() * targets.size(0)
                _g = autograd.grad(loss, weights, create_graph=True)
                l = 0
                for i in _g:
                    l += torch.pow(i, 2).sum()
                l.backward()
                optimizer2.step()
                singal=-1*singal
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)


        # copy_net=net
        # optimizer_copy = optim.SGD(copy_net.parameters(), lr=optimizer.state_dict()['param_groups'][0]['lr'] * 0.001)
        # mask=[]
        # for _,m in mb.masks.items():
        #     mask.append(torch.clone(m).detach())
        # weights = []
        # for i in copy_net.modules():
        #     if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
        #         weights.append(i.weight)
        # for epoch in range(1):
        #     copy_net.train()
        #     correct = 0
        #     total = 0
        #     for batch_idx, (inputs, targets) in enumerate(trainloader):
        #         inputs, targets = inputs.cuda(), targets.cuda()
        #         optimizer.zero_grad()
        #         outputs = copy_net(inputs)
        #         _, predicted = outputs.max(1)
        #         total += targets.size(0)
        #         correct += predicted.eq(targets).sum().item()
        #         loss = criterion(outputs, targets)
        #         _g = autograd.grad(loss, weights, create_graph=True)
        #         l = 0
        #         for i in _g:
        #             l += torch.pow(i, 2).sum()
        #         l.backward()
        #         optimizer_copy.step()
        #         # n=0
        #         # for i in copy_net.modules():
        #         #     if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
        #         #         i.weight.data.mul_(mask[n])
        #         #         n+=1
        #         mb.apply_masks()
        #         if (batch_idx % 100 == 0):
        #             print('train[epoch:{}]-----acc:{:.2f}%'.format(epoch,correct / total * 100,))
        #     loss, acc = test(copy_net, testloader, criterion, epoch)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    torch.save(net.state_dict(),'ori_model.pth')

def train_eval_loop_6(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    weight_norm=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    delta_g=[]
    iter_num=0
    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)


            if iter_num==0:
                for b, (inputs, targets) in enumerate(trainloader):
                    inputs, targets = inputs.cuda(), targets.cuda()
                    outputs = net(inputs)
                    loss = criterion(outputs, targets)
                    loss.backward()
                grad = []
                for i in net.modules():
                    if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                        grad.append(i.weight.grad/391)
                g = torch.cat([(i).flatten() for i in grad], dim=0).to(torch.device('cpu'))
            else:
                loss.backward()
                grad = []
                for i in net.modules():
                    if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                        grad.append(i.weight.grad)
                flat_g=torch.cat([(torch.clone(i).detach()).flatten() for i in grad],dim=0).to(torch.device('cpu'))
                delta_g.append(torch.abs(flat_g-g).sum().item()/torch.abs(g).sum().item())
                g=flat_g
                np.save('delta_g', delta_g)
            if iter_num==1200:
                quit()
            iter_num+=1

            # optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()
        norm=0
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                norm+=(torch.norm(i.weight.data,1).sum().item())
        weight_norm.append(norm)
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    np.save('ori_weight_norm', weight_norm)

    torch.save(net.state_dict(),'ori_model.pth')


def train_eval_loop_12(mb, trainloader, testloader, config):

    penaty_grad=[]
    delta_g=[]
    ori_loss=[]
    ori_acc=[]
    weight_norm=[]
    yihao=[]
    net = mb.model
    net.load_state_dict(torch.load('/kaggle/input/penaty-model/ori_model.pth'),strict=False)
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()


    # optimizer_net = optim.SGD(net.parameters(), lr=learning_rate)
    # lr_scheduler_net = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_net, T_max=num_epochs*2)
    #
    # for epoch in range(120):
    #     net.train()
    #     correct = 0
    #     total = 0
    #     for batch_idx, (inputs, targets) in enumerate(trainloader):
    #         inputs, targets = inputs.cuda(), targets.cuda()
    #         optimizer_net.zero_grad()
    #         copy_net = deepcopy(net)
    #         optimizer1 = optim.SGD(copy_net.parameters(), lr=1)
    #         copy_net.train()
    #         weights = []
    #         for layer in net.modules():
    #             if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
    #                 weights.append(layer.weight)
    #         net.zero_grad()
    #         optimizer_net.zero_grad()
    #         outputs = net(inputs)
    #         loss = criterion(outputs, targets)
    #         _grad_1 = autograd.grad(loss, weights)
    #         weights_copy = []
    #         for layer in copy_net.modules():
    #             if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
    #                 weights_copy.append(layer.weight)
    #         loss = 0
    #         for i in weights_copy:
    #             loss += torch.norm(i, 2)
    #         loss = 0.0005 * loss
    #         loss.backward()
    #         optimizer1.step()
    #         optimizer1.zero_grad()
    #         weights_copy = []
    #         for layer in copy_net.modules():
    #             if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
    #                 weights_copy.append(layer.weight)
    #         outputs = copy_net(inputs)
    #         loss = criterion(outputs, targets)
    #         _grad_2 = autograd.grad(loss, weights_copy)
    #         dis = 0
    #         _delta_g=0
    #         i=0
    #         m=0
    #         for layer in net.modules():
    #             if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
    #                 xjy=0.0005 * weights[i]-(_grad_2[i] - _grad_1[i])
    #                 dis += torch.norm(xjy, 1)
    #                 _delta_g += torch.norm((_grad_2[i] - _grad_1[i]), 1)
    #                 m+=torch.where((_grad_2[i] - _grad_1[i])*weights[i]<0,1,0).sum().item()
    #                 layer.weight.grad=xjy
    #                 i+=1
    #         penaty_grad.append(dis.item())
    #         delta_g.append(_delta_g.item())
    #         yihao.append(m)
    #         optimizer_net.step()
    #         optimizer1.zero_grad()
    #         norm=0
    #         for i in net.modules():
    #             if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
    #                 norm += (torch.norm(i.weight.data, 1).sum().item())
    #         weight_norm.append(norm)
    #         del copy_net,weights_copy,weights,optimizer1,loss,outputs,_grad_2,_grad_1,
    #         outputs = net(inputs)
    #         _, predicted = outputs.max(1)
    #         total += targets.size(0)
    #         correct += predicted.eq(targets).sum().item()
    #         if (batch_idx % 100 == 0):
    #             print('train[epoch:{}]-----acc:{:.2f}%,lr:{:.7f}'.format(epoch,correct / total * 100, optimizer_net.state_dict()['param_groups'][0]['lr']))
    #         mb.apply_masks()
    #     norm = 0
    #
    #
    #     lr_scheduler_net.step()
    #     loss, acc = test(net, testloader, criterion, epoch)
    # torch.save(net.state_dict(), 'ori_model.pth')

    optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            norm = 0
            for i in net.modules():
                if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                    norm += (torch.norm(i.weight.data, 1).sum().item())
            weight_norm.append(norm)
        lr_scheduler.step()
        loss, acc = test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    np.save('ori_weight_norm', weight_norm)
    np.save('penaty_grad',penaty_grad)
    np.save('yihao',yihao)
    np.save('delta_g',delta_g)
