import torch
from torch._C import import_ir_module
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn

import torchvision
import torchvision.transforms as transforms

import os
import argparse
import random
import numpy as np 
from easypruner import fastpruner 

from pytorch_cifar.models import *

def seed_torch(seed = 2222):
    seed = int(seed)
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
# parser.add_argument('--output_dir', type=str, help='output dir')
parser.add_argument('--model', type=str, help='model name')
parser.add_argument('--weight', type=str, help='model weight')
parser.add_argument('--mode', type=str, help='model name')
parser.add_argument('--rate', default=0.5, type=float, help='learning rate')
args = parser.parse_args()

seed_torch()

device = 'cuda' if torch.cuda.is_available() else 'cpu'


# Model
print('==> Building model..')

if args.model == 'vgg19':
    net = VGG('VGG19')
elif args.model == 'vgg16':
    net = VGG('VGG16')
elif args.model == 'resnet18':
    net = ResNet18()
elif args.model == 'resnet50':
    net = ResNet50()
elif args.model == 'resnet101':
    net = ResNet101()
elif args.model == 'preactresnet18':
    net = PreActResNet18()
elif args.model == 'googlenet':
    net = GoogLeNet()
elif args.model == 'densenet121':
    net = DenseNet121()
elif args.model == 'mobilenet':
    net = MobileNet()
elif args.model == 'mobilenetv2':
    net = MobileNetV2()
elif args.model == 'regnetx_200mf':
    net = RegNetX_200MF()

net = net.to(device)
# if device == 'cuda':
#     net = torch.nn.DataParallel(net)
#     cudnn.benchmark = True

checkpoint = torch.load(  args.weight  )    #'./pytorch_cifar/work_dirs/{}_1/{}.pth'.format(args.model, args.model))
#import pdb;pdb.set_trace()

checkpoint = checkpoint['net']

if  [k for k,v in checkpoint.items()][0].split('.')[0] == 'module':
    import collections
    checkpoint_ = collections.OrderedDict()
    for k,v in checkpoint.items():
        checkpoint_[  k[7:]    ]=v
    checkpoint = checkpoint_

net.load_state_dict(checkpoint)
model = net
# .module

model.cpu() 

if args.mode == 'u':
    fastpruner.fastpruner(model, prune_factor=args.rate, method="Uniform", input_dim=[3, 32, 32]) #Ratio 和uniform两种方式都可以试试，注意大小写 
elif args.mode == 'r':
    fastpruner.fastpruner(model, prune_factor=args.rate, method="Ratio", input_dim=[3, 32, 32]) #Ratio 和uniform两种方式都可以试试，注意大小写 
model.to(device)   



#save_path = './pytorch_cifar/work_dirs/{}_1/{}_pruned_{}.pt'.format(args.model, args.model, args.mode) #可选 
#save_path_all = './pytorch_cifar/work_dirs/{}_1/{}_pruned_{}_{}.pth'.format(args.model, args.model, args.mode, args.rate) #可选 
#torch.save(model.state_dict(), save_path) #可选 
#torch.save(model, save_path_all)

save_path = '/'.join(  args.weight.split('/')[:-1]  )+'/pruned'
import os
if not os.path.exists(save_path):
    os.makedirs(save_path)

save_path = save_path+'/'+args.model+'_'+args.mode+'_'+str(args.rate)+'.pth'

torch.save(model.state_dict(), save_path)





print(save_path)
print('Finish prune {}'.format(args.model))
