Commit
•
6def2be
1
Parent(s):
81d0ed5
Update app.py
Browse files
app.py
CHANGED
@@ -42,21 +42,21 @@ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device),
|
|
42 |
step_loaded = step
|
43 |
|
44 |
# Note Julian: I'm not sure this works well when the pipeline changes dynamically.. to check
|
45 |
-
helper = DeepCacheSDHelper(pipe=pipe)
|
46 |
-
helper.set_params(
|
47 |
-
# cache_interval means the frequency of feature caching, specified as the number of steps between each cache operation.
|
48 |
-
# with AnimateDiff this seems to have large effects, so we cannot use large values,
|
49 |
-
# even with cache_interval=3 I notice a big degradation in quality
|
50 |
-
cache_interval=2,
|
51 |
-
|
52 |
-
# cache_branch_id identifies which branch of the network (ordered from the shallowest to the deepest layer) is responsible for executing the caching processes.
|
53 |
-
# Note Julian: I should create my own benchmarks for this
|
54 |
-
cache_branch_id=0,
|
55 |
-
|
56 |
-
# Opting for a lower cache_branch_id or a larger cache_interval can lead to faster inference speed at the expense of reduced image quality
|
57 |
-
#(ablation experiments of these two hyperparameters can be found in the paper).
|
58 |
-
)
|
59 |
-
helper.enable()
|
60 |
|
61 |
# ----------------------------------- VIDEO ENCODING ---------------------------------
|
62 |
# The Diffusers utils hardcode MP4V as a codec which is not supported by all browsers.
|
|
|
42 |
step_loaded = step
|
43 |
|
44 |
# Note Julian: I'm not sure this works well when the pipeline changes dynamically.. to check
|
45 |
+
#helper = DeepCacheSDHelper(pipe=pipe)
|
46 |
+
#helper.set_params(
|
47 |
+
# # cache_interval means the frequency of feature caching, specified as the number of steps between each cache operation.
|
48 |
+
# # with AnimateDiff this seems to have large effects, so we cannot use large values,
|
49 |
+
# # even with cache_interval=3 I notice a big degradation in quality
|
50 |
+
# cache_interval=2,
|
51 |
+
#
|
52 |
+
# # cache_branch_id identifies which branch of the network (ordered from the shallowest to the deepest layer) is responsible for executing the caching processes.
|
53 |
+
# # Note Julian: I should create my own benchmarks for this
|
54 |
+
# cache_branch_id=0,
|
55 |
+
#
|
56 |
+
# # Opting for a lower cache_branch_id or a larger cache_interval can lead to faster inference speed at the expense of reduced image quality
|
57 |
+
# #(ablation experiments of these two hyperparameters can be found in the paper).
|
58 |
+
#)
|
59 |
+
#helper.enable()
|
60 |
|
61 |
# ----------------------------------- VIDEO ENCODING ---------------------------------
|
62 |
# The Diffusers utils hardcode MP4V as a codec which is not supported by all browsers.
|