Spaces:
Sleeping
Sleeping
mahmoud669
commited on
Commit
•
60ebf8f
1
Parent(s):
e40a05a
Update scrub.py
Browse files
scrub.py
CHANGED
@@ -344,12 +344,13 @@ def unlearn():
|
|
344 |
model.load_state_dict(torch.load('faces_best_model.pth', map_location=torch.device('cpu')))
|
345 |
model_eval = copy.deepcopy(model)
|
346 |
model_eval.eval()
|
|
|
347 |
forget_class = get_forget_class('forget_set', model_eval)
|
348 |
mean, std, im_size = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225], 224
|
349 |
tfs = transforms.Compose([transforms.Resize((im_size, im_size)), transforms.ToTensor(), transforms.Normalize(mean = mean, std = std)])
|
350 |
will_tr_dl, will_val_dl, will_ts_dl, classes = get_dls(root = "forget_set", forget_class=forget_class, transformations = tfs, bs = 32, single=True)
|
351 |
celebs_tr_dl, celebs_val_dl, celebs_ts_dl, classes = get_dls(root = "celeb-dataset", forget_class=forget_class, transformations = tfs, bs = 32)
|
352 |
-
|
353 |
args = Args()
|
354 |
args.optim = 'sgd'
|
355 |
args.gamma = 0.99
|
@@ -395,8 +396,9 @@ def unlearn():
|
|
395 |
weight_decay=args.sgda_weight_decay)
|
396 |
|
397 |
module_list.append(model_t)
|
398 |
-
|
399 |
for epoch in tqdm(range(1, args.sgda_epochs + 1)):
|
|
|
400 |
maximize_loss = 0
|
401 |
if epoch <= args.msteps:
|
402 |
maximize_loss = train_distill(epoch, will_tr_dl, module_list, swa_model, criterion_list, optimizer, args, "maximize")
|
|
|
344 |
model.load_state_dict(torch.load('faces_best_model.pth', map_location=torch.device('cpu')))
|
345 |
model_eval = copy.deepcopy(model)
|
346 |
model_eval.eval()
|
347 |
+
print("BEGIN INTIALZINGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG")
|
348 |
forget_class = get_forget_class('forget_set', model_eval)
|
349 |
mean, std, im_size = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225], 224
|
350 |
tfs = transforms.Compose([transforms.Resize((im_size, im_size)), transforms.ToTensor(), transforms.Normalize(mean = mean, std = std)])
|
351 |
will_tr_dl, will_val_dl, will_ts_dl, classes = get_dls(root = "forget_set", forget_class=forget_class, transformations = tfs, bs = 32, single=True)
|
352 |
celebs_tr_dl, celebs_val_dl, celebs_ts_dl, classes = get_dls(root = "celeb-dataset", forget_class=forget_class, transformations = tfs, bs = 32)
|
353 |
+
print("BEGIN PEeEEEEEEEEEEEEEEEEEEEEEEERPARING FOR UNLEARNINGGGGGGGG")
|
354 |
args = Args()
|
355 |
args.optim = 'sgd'
|
356 |
args.gamma = 0.99
|
|
|
396 |
weight_decay=args.sgda_weight_decay)
|
397 |
|
398 |
module_list.append(model_t)
|
399 |
+
print("BEGIN UNLEARNINGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG")
|
400 |
for epoch in tqdm(range(1, args.sgda_epochs + 1)):
|
401 |
+
print("\n\n==============================>epoch: ", epoch)
|
402 |
maximize_loss = 0
|
403 |
if epoch <= args.msteps:
|
404 |
maximize_loss = train_distill(epoch, will_tr_dl, module_list, swa_model, criterion_list, optimizer, args, "maximize")
|