# -- coding: utf-8 --
import numpy as np
import torch

from pruner.pruning import *
from train_test import *
from copy import deepcopy
from utils import common_utils
from torch import autograd

def record_grad_noise(mb,trainloader,epoch,config):
    g_noise_norm=[[],[],[]]
    prune_mask = []
    remain_mask = []
    r=0
    t=0
    for layer, m in mb.masks.items():
        r+=len(torch.nonzero(layer.weight.data))
        t+=layer.weight.data.numel()
        prune_mask.append(torch.clone(-1 * (m - 1)).detach())
        remain_mask.append(torch.clone(m).detach())
    print('true remain ratio',r/t)
    net = mb.model
    net.eval()
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    g_mean=[]
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            g_mean.append(torch.zeros_like(i.weight.data))

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.cuda(), targets.cuda()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
    n=0
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            g_mean[n]+=torch.clone(i.weight.grad).detach()/len(trainloader)
            n+=1

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        n=r=p=t=0
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                r+=torch.norm((g_mean[n]-i.weight.grad)*remain_mask[n]).item()
                p += torch.norm((g_mean[n] - i.weight.grad) * prune_mask[n]).item()
                t += torch.norm((g_mean[n] - i.weight.grad)).item()
                n+=1
        g_noise_norm[0].append(r)
        g_noise_norm[1].append(p)
        g_noise_norm[2].append(t)
        mb.apply_masks()
    r = 0
    t = 0
    for layer, m in mb.masks.items():
        r += len(torch.nonzero(layer.weight.data))
        t += layer.weight.data.numel()
    print('true remain ratio', r / t)
    np.save('g_noise_norm'+str(epoch)+'.npy',g_noise_norm)
    net.train()

g_mean_epoch=[[],[],[]]
noise_mean_epoch=[[],[],[]]
def record_noise_mean_for_epoch(mb,trainloader,epoch,config):
    g_noise_norm=[[],[],[]]
    prune_mask = []
    remain_mask = []
    r=0
    t=0
    for layer, m in mb.masks.items():
        r+=len(torch.nonzero(layer.weight.data))
        t+=layer.weight.data.numel()
        prune_mask.append(torch.clone(-1 * (m - 1)).detach())
        remain_mask.append(torch.clone(m).detach())
    print('true remain ratio',r/t)
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    g_mean=[]
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            g_mean.append(torch.zeros_like(i.weight.data))

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.cuda(), targets.cuda()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
    n=0
    r=p=t=0
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            g_mean[n]+=torch.clone(i.weight.grad).detach()/len(trainloader)
            r+=torch.norm(torch.clone(i.weight.grad).detach()*remain_mask[n]/len(trainloader)).item()
            p+=torch.norm(torch.clone(i.weight.grad).detach() * prune_mask[n] / len(trainloader)).item()
            t+=torch.norm(torch.clone(i.weight.grad).detach()/ len(trainloader)).item()
            n+=1
    g_mean_epoch[0].append(r)
    g_mean_epoch[1].append(p)
    g_mean_epoch[2].append(t)
    np.save('g_mean_epoch_'+config.exp_name,g_mean_epoch)

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        n=r=p=t=0
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                r+=torch.norm((g_mean[n]-i.weight.grad)*remain_mask[n]).item()
                p += torch.norm((g_mean[n] - i.weight.grad) * prune_mask[n]).item()
                t += torch.norm((g_mean[n] - i.weight.grad)).item()
                n+=1
        g_noise_norm[0].append(r)
        g_noise_norm[1].append(p)
        g_noise_norm[2].append(t)
        mb.apply_masks()
    noise_mean_epoch[0].append(np.mean(g_noise_norm[0]))
    noise_mean_epoch[1].append(np.mean(g_noise_norm[1]))
    noise_mean_epoch[2].append(np.mean(g_noise_norm[2]))
    np.save('noise_mean_epoch_'+config.exp_name,noise_mean_epoch)
    r = 0
    t = 0
    for layer, m in mb.masks.items():
        r += len(torch.nonzero(layer.weight.data))
        t += layer.weight.data.numel()
    print('true remain ratio', r / t)
    # np.save('g_noise_norm'+str(epoch)+'.npy',g_noise_norm)
    net.train()

def train_eval_loop_0(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    train_loss_record=[]
    weight_norm=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()
        norm=0
        train_loss_record.append(train_loss/total)
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                norm+=(torch.norm(i.weight.data,1).sum().item())
        weight_norm.append(norm)
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    np.save('train_loss',train_loss_record)
    np.save('ori_weight_norm', weight_norm)
    torch.save(net.state_dict(),'ori_model.pth')

def train_eval_loop_1(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    mb.apply_masks()
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):

        record_noise_mean_for_epoch(mb,trainloader,epoch,config)
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)

    torch.save(net.state_dict(),'ori_model.pth')

def train_eval_loop_5(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    weight_norm=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    pre_epoch = config.pre_epoch
    pre_learning_rate =config.pre_learning_rate
    optimizer = optim.SGD(net.parameters(), lr=pre_learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    mask = []
    prune_mask = []
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            mask.append(torch.clone(mb.masks[i]).detach())
            prune_mask.append(torch.abs(torch.clone(-1 * (mb.masks[i] - 1))).detach())

    mask_compare = []
    prune_mask_compare = []
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            mask_compare.append(torch.clone(mb.masks_compare[i]).detach())
            prune_mask_compare.append(torch.clone(torch.abs(-1 * (mb.masks_compare[i] - 1)).detach()))


    # print("prune")
    # print("len", len(prune_mask_compare))
    # print("len", len(prune_mask))
    prune_mask_sub = []
    for i in range(len(prune_mask_compare)):
        prune_mask_sub.append(torch.where(prune_mask_compare[i] == prune_mask[i], 0, 1))

    weights = []
    for layer in net.modules():
        if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
            weights.append(layer.weight)

    for epoch in range(pre_epoch):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            _grad = autograd.grad(loss, weights, create_graph=True)

            # g = torch.tensor(0.0, requires_grad=True)
            g = 0
            for i,j in zip(_grad, prune_mask):
                g += (i*j).pow(2).mean()


            # print("g.type", type(g))
            print("=================g=====================",g)
            g.backward()
            optimizer.step()
            # weights.clear()
            # for layer in net.modules():
            #     if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
            #         weights.append(layer.weight)

            mb.apply_masks()
        lr_scheduler.step()
        print(g)
        loss,acc=test(net, testloader, criterion, epoch)
    print("--------------------------finish pre-training-------------------------------------")
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):
        # record_noise_mean_for_epoch(mb,trainloader,epoch,config)
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            # _grad = autograd.grad(loss, weights, create_graph=True)
            # gp_loss= 0
            # for i, j in zip(_grad, prune_mask):
            #     gp_loss += (i * j).mean()
            # loss = loss + gp_loss

            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()
        norm=0
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                norm+=(torch.norm(i.weight.data,1).sum().item())
        weight_norm.append(norm)
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)

    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)



def train_eval_loop_6(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    weight_norm=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    mask = []
    prune_mask = []
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            mask.append(torch.clone(mb.masks[i]).detach())
            prune_mask.append(torch.clone(-1 * (mb.masks[i] - 1)).detach())
    weights = []
    for layer in net.modules():
        if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
            weights.append(layer.weight)



    for epoch in range(5):
        # record g_mean
        g_mean = []
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
        n = 0
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                g_mean.append(torch.clone(i.weight.grad).detach() / len(trainloader))
                n += 1

        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            _grad = autograd.grad(loss, weights, create_graph=True)
            g=0
            n=0
            for i,j in zip(_grad,prune_mask):
                g+=torch.norm((i-g_mean[n])*j,2)
                n+=1
            g.backward()
            optimizer.step()
            mb.apply_masks()
        lr_scheduler.step()
        print(g)
        loss,acc=test(net, testloader, criterion, epoch)

    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):
        record_noise_mean_for_epoch(mb,trainloader,epoch,config)
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()
        norm=0
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                norm+=(torch.norm(i.weight.data,1).sum().item())
        weight_norm.append(norm)
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)

    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)



def train_eval_loop_4(mb, trainloader, testloader, config):
    import torchvision
    import torchvision.transforms as transforms
    root = '../Data'
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
    ])

    from torch.utils.data import SubsetRandomSampler
    trainset=torchvision.datasets.CIFAR100(root=root + '/cifar-100-python', train=True, download=True,
                                 transform=transform_train)
    l=np.arange(len(trainset))
    num_samples=int(0.9*len(trainset))
    random_samples = np.random.choice(l, size=num_samples, replace=False)
    trainloader_p = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=False,sampler=SubsetRandomSampler(random_samples)
                                              )

    record_loss = []
    ori_loss=[]
    ori_acc=[]
    train_loss_record=[]
    weight_norm=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader_p):
            inputs, targets = inputs.cuda(), targets.cuda()

            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))

            # mb.apply_masks()
            if batch_idx%40==0:
                with torch.no_grad():
                    lo=0
                    num=len(trainloader)
                    for batch_idx, (inputs, targets) in enumerate(trainloader):
                        inputs, targets = inputs.cuda(), targets.cuda()
                        # print(inputs.shape)
                        optimizer.zero_grad()
                        outputs = net(inputs)
                        loss = criterion(outputs, targets)
                        lo+=loss.item()
                    record_loss.append(lo/num)
        print(total)
        np.save('hat_loss',record_loss)




        norm=0
        train_loss_record.append(train_loss/total)
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                norm+=(torch.norm(i.weight.data,1).sum().item())
        weight_norm.append(norm)
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    np.save('train_loss',train_loss_record)
    np.save('ori_weight_norm', weight_norm)
    torch.save(net.state_dict(),'ori_model.pth')

def train_eval_loop_10(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    train_loss_record=[]
    weight_norm=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            # mb.apply_masks()
        norm=0
        train_loss_record.append(train_loss/total)
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                norm+=(torch.norm(i.weight.data,1).sum().item())
        weight_norm.append(norm)
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    np.save('train_loss',train_loss_record)
    np.save('ori_weight_norm', weight_norm)
    torch.save(net.state_dict(),'ori_model.pth')


def train_eval_loop_11(mb, trainloader, testloader, config):

    ori_loss=[]
    ori_acc=[]
    train_loss_record=[]
    weight_norm=[]
    net = mb.model

    ori_net=deepcopy(net)
    ori_net.load_state_dict(torch.load('ori_model.pth'))

    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)

    dis=[]

    for epoch in range(num_epochs):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
                value=0
                for i,j in zip(net.modules(),ori_net.modules()):
                    if isinstance(i,nn.Linear) or isinstance(i,nn.Conv2d):
                        value+=torch.norm(i.weight.data-j.weight.data,1).item()
                dis.append(value)
            # mb.apply_masks()
        norm=0
        train_loss_record.append(train_loss/total)
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                norm+=(torch.norm(i.weight.data,1).sum().item())
        weight_norm.append(norm)
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    np.save('train_loss',train_loss_record)
    np.save('ori_weight_norm', weight_norm)
    np.save('dis',dis)
    # torch.save(net.state_dict(),'ori_model.pth')

def train_eval_loop_2(mb, trainloader, testloader, config):
    import torchvision
    import torchvision.transforms as transforms
    stage=config.stage
    root = '../Data'
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
    ])

    from torch.utils.data import SubsetRandomSampler
    trainset = torchvision.datasets.CIFAR100(root=root + '/cifar-100-python', train=True, download=True,
                                             transform=transform_train)
    l = np.arange(len(trainset))
    num_samples = int(0.3 * len(trainset))
    random_samples = np.random.choice(l, size=num_samples, replace=False)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=False,
                                                sampler=SubsetRandomSampler(random_samples)
                                                )

    ori_loss=[]
    ori_acc=[]
    train_loss_record=[]
    weight_norm=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):

        if epoch > (stage-0.1)*180 and epoch < (stage)*180:

            trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=False,
                                                      sampler=SubsetRandomSampler(random_samples)
                                                      )
        else:
            trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True
                                                      )

        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            # mb.apply_masks()
        norm=0
        train_loss_record.append(train_loss/total)
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                norm+=(torch.norm(i.weight.data,1).sum().item())
        weight_norm.append(norm)
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True
                                              )
    train_loss=0
    num=0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        train_loss += loss.item()
        num+=1
    print(f'result loss: {train_loss/num}')


    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
    np.save('train_loss',train_loss_record)
    np.save('ori_weight_norm', weight_norm)
    torch.save(net.state_dict(),'ori_model.pth')




def train_eval_loop_7(mb, trainloader, testloader, config):
    ori_loss=[]
    ori_acc=[]
    weight_norm=[]
    net = mb.model
    learning_rate = config.learning_rate
    weight_decay = config.weight_decay
    num_epochs = config.epoch
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.0002, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    mask = []
    prune_mask = []
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            mask.append(torch.clone(mb.masks[i]).detach())
            prune_mask.append(torch.clone(-1 * (mb.masks[i] - 1)).detach())

    mask_compare = []
    prune_mask_compare = []
    for i in net.modules():
        if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
            mask_compare.append(torch.clone(mb.masks_compare[i]).detach())
            prune_mask_compare.append(torch.clone(-1 * (mb.masks_compare[i] - 1)).detach())


    # print("prune")
    # print("len", len(prune_mask_compare))
    # print("len", len(prune_mask))
    prune_mask_sub = []
    for i in range(len(prune_mask_compare)):
        prune_mask_sub.append(torch.where(prune_mask_compare[i] == prune_mask[i], 0, 1))

    weights = []
    for layer in net.modules():
        if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
            weights.append(layer.weight)

    for epoch in range(5):
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward();

            optimizer.step()
            mb.apply_masks()
        lr_scheduler.step()

        loss,acc=test(net, testloader, criterion, epoch)
    print("--------------------------finish pre-training-------------------------------------")
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    for epoch in range(num_epochs):
        record_noise_mean_for_epoch(mb,trainloader,epoch,config)
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        lr = lr_scheduler.get_last_lr()
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()*targets.size(0)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            _lr = lr_scheduler.get_last_lr()
            if(batch_idx%100==0):
                print('train[epoch:{}]-----loss:{:.8f},acc:{:.2f}%,lr:{}'.format(epoch,train_loss/total,correct/total*100,_lr))
            mb.apply_masks()
        norm=0
        for i in net.modules():
            if isinstance(i, nn.Conv2d) or isinstance(i, nn.Linear):
                norm+=(torch.norm(i.weight.data,1).sum().item())
        weight_norm.append(norm)
        lr_scheduler.step()
        loss,acc=test(net, testloader, criterion, epoch)
        ori_loss.append(loss)
        ori_acc.append(acc)

    np.save('ori_loss.npy', ori_loss)
    np.save('ori_acc.npy', ori_acc)
