LeoXing1996 commited on
Commit
f302f51
1 Parent(s): f0bcc62

add seed everything

Browse files
Files changed (1) hide show
  1. app-huggingface.py +20 -9
app-huggingface.py CHANGED
@@ -53,6 +53,16 @@ VAE_PATH = './models/VAE'
53
  DreamBooth_LoRA_PATH = './models/DreamBooth_LoRA'
54
 
55
 
 
 
 
 
 
 
 
 
 
 
56
  if not LOCAL_DEBUG:
57
  CACHE_PATH = './models'
58
 
@@ -235,12 +245,12 @@ class AnimateController:
235
  gr.Info('Use exmaple image for fast try!')
236
  init_img = np.array(Image.open(example_img))
237
 
238
- init_img_bk = copy.deepcopy(init_img)
239
  if seed_textbox != -1 and seed_textbox != "":
240
  torch.manual_seed(int(seed_textbox))
241
  else:
242
- torch.seed()
243
- seed = torch.initial_seed()
 
244
 
245
  pipeline = self.pipeline_dict[style]
246
  init_img, h, w = preprocess_img(init_img)
@@ -278,7 +288,8 @@ class AnimateController:
278
  f.write(json_str)
279
  f.write("\n\n")
280
 
281
- return init_img_bk, save_sample_path
 
282
 
283
 
284
  controller = AnimateController()
@@ -407,7 +418,7 @@ def ui():
407
  ip_adapter_scale,
408
  style_dropdown,
409
  ],
410
- outputs=[init_img, result_video]
411
  )
412
  default_1.click(
413
  fn=controller.animate,
@@ -423,7 +434,7 @@ def ui():
423
  default_style,
424
  example_img,
425
  ],
426
- outputs=[init_img, result_video])
427
  default_2.click(
428
  fn=controller.animate,
429
  inputs=[
@@ -438,7 +449,7 @@ def ui():
438
  default_style,
439
  example_img,
440
  ],
441
- outputs=[init_img, result_video])
442
  default_3.click(
443
  fn=controller.animate,
444
  inputs=[
@@ -453,7 +464,7 @@ def ui():
453
  default_style,
454
  example_img,
455
  ],
456
- outputs=[init_img, result_video])
457
  default_4.click(
458
  fn=controller.animate,
459
  inputs=[
@@ -468,7 +479,7 @@ def ui():
468
  default_style,
469
  example_img,
470
  ],
471
- outputs=[init_img, result_video])
472
 
473
  def create_example(input_list):
474
  return gr.Examples(
 
53
  DreamBooth_LoRA_PATH = './models/DreamBooth_LoRA'
54
 
55
 
56
+ def seed_everything(seed):
57
+ import random
58
+
59
+ import numpy as np
60
+ torch.manual_seed(seed)
61
+ torch.cuda.manual_seed_all(seed)
62
+ np.random.seed(seed % (2**32))
63
+ random.seed(seed)
64
+
65
+
66
  if not LOCAL_DEBUG:
67
  CACHE_PATH = './models'
68
 
 
245
  gr.Info('Use exmaple image for fast try!')
246
  init_img = np.array(Image.open(example_img))
247
 
 
248
  if seed_textbox != -1 and seed_textbox != "":
249
  torch.manual_seed(int(seed_textbox))
250
  else:
251
+ seed = torch.initial_seed()
252
+
253
+ seed_everything(seed)
254
 
255
  pipeline = self.pipeline_dict[style]
256
  init_img, h, w = preprocess_img(init_img)
 
288
  f.write(json_str)
289
  f.write("\n\n")
290
 
291
+ # return init_img_bk, save_sample_path
292
+ return save_sample_path
293
 
294
 
295
  controller = AnimateController()
 
418
  ip_adapter_scale,
419
  style_dropdown,
420
  ],
421
+ outputs=[result_video]
422
  )
423
  default_1.click(
424
  fn=controller.animate,
 
434
  default_style,
435
  example_img,
436
  ],
437
+ outputs=[result_video])
438
  default_2.click(
439
  fn=controller.animate,
440
  inputs=[
 
449
  default_style,
450
  example_img,
451
  ],
452
+ outputs=[result_video])
453
  default_3.click(
454
  fn=controller.animate,
455
  inputs=[
 
464
  default_style,
465
  example_img,
466
  ],
467
+ outputs=[result_video])
468
  default_4.click(
469
  fn=controller.animate,
470
  inputs=[
 
479
  default_style,
480
  example_img,
481
  ],
482
+ outputs=[result_video])
483
 
484
  def create_example(input_list):
485
  return gr.Examples(