user commited on
Commit
22d7bd3
1 Parent(s): a468bb5
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +1 -1
  2. extensions/deforum/.github/FUNDING.yml +13 -0
  3. extensions/deforum/.github/ISSUE_TEMPLATE/bug_report.yml +97 -0
  4. extensions/deforum/.github/ISSUE_TEMPLATE/config.yml +8 -0
  5. extensions/deforum/.github/ISSUE_TEMPLATE/feature_request.yml +40 -0
  6. extensions/deforum/.gitignore +10 -0
  7. extensions/deforum/CONTRIBUTING.md +7 -0
  8. extensions/deforum/LICENSE +0 -0
  9. extensions/deforum/README.md +81 -0
  10. extensions/deforum/install.py +14 -0
  11. extensions/deforum/javascript/deforum-hints.js +191 -0
  12. extensions/deforum/javascript/deforum.js +21 -0
  13. extensions/deforum/requirements.txt +7 -0
  14. extensions/deforum/scripts/deforum.py +318 -0
  15. extensions/deforum/scripts/deforum_helpers/__init__.py +7 -0
  16. extensions/deforum/scripts/deforum_helpers/animation.py +258 -0
  17. extensions/deforum/scripts/deforum_helpers/animation_key_frames.py +106 -0
  18. extensions/deforum/scripts/deforum_helpers/args.py +1214 -0
  19. extensions/deforum/scripts/deforum_helpers/blank_frame_reroll.py +24 -0
  20. extensions/deforum/scripts/deforum_helpers/colors.py +16 -0
  21. extensions/deforum/scripts/deforum_helpers/composable_masks.py +198 -0
  22. extensions/deforum/scripts/deforum_helpers/deforum_controlnet.py +462 -0
  23. extensions/deforum/scripts/deforum_helpers/deforum_controlnet_hardcode.py +193 -0
  24. extensions/deforum/scripts/deforum_helpers/deprecation_utils.py +20 -0
  25. extensions/deforum/scripts/deforum_helpers/depth.py +166 -0
  26. extensions/deforum/scripts/deforum_helpers/frame_interpolation.py +192 -0
  27. extensions/deforum/scripts/deforum_helpers/general_utils.py +32 -0
  28. extensions/deforum/scripts/deforum_helpers/generate.py +244 -0
  29. extensions/deforum/scripts/deforum_helpers/gradio_funcs.py +83 -0
  30. extensions/deforum/scripts/deforum_helpers/human_masking.py +72 -0
  31. extensions/deforum/scripts/deforum_helpers/hybrid_video.py +436 -0
  32. extensions/deforum/scripts/deforum_helpers/image_sharpening.py +22 -0
  33. extensions/deforum/scripts/deforum_helpers/load_images.py +102 -0
  34. extensions/deforum/scripts/deforum_helpers/noise.py +64 -0
  35. extensions/deforum/scripts/deforum_helpers/parseq_adapter.py +164 -0
  36. extensions/deforum/scripts/deforum_helpers/prompt.py +113 -0
  37. extensions/deforum/scripts/deforum_helpers/render.py +507 -0
  38. extensions/deforum/scripts/deforum_helpers/render_modes.py +154 -0
  39. extensions/deforum/scripts/deforum_helpers/rich.py +2 -0
  40. extensions/deforum/scripts/deforum_helpers/save_images.py +80 -0
  41. extensions/deforum/scripts/deforum_helpers/seed.py +26 -0
  42. extensions/deforum/scripts/deforum_helpers/settings.py +272 -0
  43. extensions/deforum/scripts/deforum_helpers/src/adabins/__init__.py +1 -0
  44. extensions/deforum/scripts/deforum_helpers/src/adabins/layers.py +36 -0
  45. extensions/deforum/scripts/deforum_helpers/src/adabins/miniViT.py +45 -0
  46. extensions/deforum/scripts/deforum_helpers/src/adabins/unet_adaptive_bins.py +154 -0
  47. extensions/deforum/scripts/deforum_helpers/src/clipseg/LICENSE +21 -0
  48. extensions/deforum/scripts/deforum_helpers/src/clipseg/Quickstart.ipynb +107 -0
  49. extensions/deforum/scripts/deforum_helpers/src/clipseg/Readme.md +84 -0
  50. extensions/deforum/scripts/deforum_helpers/src/clipseg/Tables.ipynb +349 -0
.gitignore CHANGED
@@ -29,7 +29,7 @@ notification.mp3
29
  /SwinIR
30
  /textual_inversion
31
  .vscode
32
- /extensions
33
  /test/stdout.txt
34
  /test/stderr.txt
35
  /cache.json
 
29
  /SwinIR
30
  /textual_inversion
31
  .vscode
32
+ # /extensions
33
  /test/stdout.txt
34
  /test/stderr.txt
35
  /cache.json
extensions/deforum/.github/FUNDING.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # These are supported funding model platforms
2
+
3
+ github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4
+ patreon: deforum
5
+ open_collective: # Replace with a single Open Collective username
6
+ ko_fi: # Replace with a single Ko-fi username
7
+ tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8
+ community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9
+ liberapay: # Replace with a single Liberapay username
10
+ issuehunt: # Replace with a single IssueHunt username
11
+ otechie: # Replace with a single Otechie username
12
+ lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
13
+ custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
extensions/deforum/.github/ISSUE_TEMPLATE/bug_report.yml ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug Report
2
+ description: Create a bug report for the Deforum extension
3
+ title: "[Bug]: "
4
+ labels: ["bug-report"]
5
+
6
+ body:
7
+ - type: checkboxes
8
+ attributes:
9
+ label: Have you read the latest version of the FAQ?
10
+ description: Please visit the page called FAQ & Troubleshooting on the Deforum wiki in this repository and see if your problem has already been described there.
11
+ options:
12
+ - label: I have visited the FAQ page right now and my issue is not present there
13
+ required: true
14
+ - type: checkboxes
15
+ attributes:
16
+ label: Is there an existing issue for this?
17
+ description: Please search to see if an issue already exists for the bug you encountered (including the closed issues).
18
+ options:
19
+ - label: I have searched the existing issues and checked the recent builds/commits of both this extension and the webui
20
+ required: true
21
+ - type: checkboxes
22
+ attributes:
23
+ label: Are you using the latest version of the Deforum extension?
24
+ description: Please, check if your Deforum is based on the latest repo commit (git log) or update it through the 'Extensions' tab and check if the issue still persist. Otherwise, check this box.
25
+ options:
26
+ - label: I have Deforum updated to the lastest version and I still have the issue.
27
+ required: true
28
+ - type: markdown
29
+ attributes:
30
+ value: |
31
+ *Please fill this form with as much information as possible, don't forget to fill "What OS..." and "What browsers" and *provide screenshots if possible**
32
+ - type: textarea
33
+ id: what-did
34
+ attributes:
35
+ label: What happened?
36
+ description: Tell us what happened in a very clear and simple way
37
+ validations:
38
+ required: true
39
+ - type: textarea
40
+ id: steps
41
+ attributes:
42
+ label: Steps to reproduce the problem
43
+ description: Please provide us with precise step by step information on how to reproduce the bug
44
+ value: |
45
+ 1. Go to ....
46
+ 2. Press ....
47
+ 3. ...
48
+ validations:
49
+ required: true
50
+ - type: textarea
51
+ id: what-should
52
+ attributes:
53
+ label: What should have happened?
54
+ description: Tell what you think the normal behavior should be
55
+ - type: textarea
56
+ id: commits
57
+ attributes:
58
+ label: WebUI and Deforum extension Commit IDs
59
+ description: Which commit of the webui/deforum extension are you running on? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit** link at the bottom of the UI, or if you can't launch the webui at all, enter your cmd/terminal, CD into the main webui folder to get the webui commit id, and cd into the extensions/deforum folder to get the deforum commit id, both using the command 'git rev-parse HEAD'.)
60
+ value: |
61
+ webui commit id -
62
+ deforum exten commit id -
63
+ validations:
64
+ required: true
65
+ - type: dropdown
66
+ id: where
67
+ attributes:
68
+ label: On which platform are you launching the webui with the extension?
69
+ multiple: true
70
+ options:
71
+ - Local PC setup (Windows)
72
+ - Local PC setup (Linux)
73
+ - Local PC setup (Mac)
74
+ - Google Colab (The Last Ben's)
75
+ - Google Colab (Other)
76
+ - Cloud server (Linux)
77
+ - Other (please specify in "additional information")
78
+ - type: textarea
79
+ id: customsettings
80
+ attributes:
81
+ label: Webui core settings
82
+ description: Send here a link to your ui-config.json file in the core 'stable-diffusion-webui' folder (ideally, upload it to GitHub gists). Friendly reminder - if you have 'With img2img, do exactly the amount of steps the slider specified' checked, your issue will be discarded immediately. 😉
83
+ validations:
84
+ required: true
85
+ - type: textarea
86
+ id: logs
87
+ attributes:
88
+ label: Console logs
89
+ description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after your bug happened. If it's very long, provide a link to GitHub gists or similar service.
90
+ render: Shell
91
+ validations:
92
+ required: true
93
+ - type: textarea
94
+ id: misc
95
+ attributes:
96
+ label: Additional information
97
+ description: Please provide us with any relevant additional info or context.
extensions/deforum/.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: Deforum Github discussions
4
+ url: https://github.com/deforum-art/deforum-for-automatic1111-webui/discussions
5
+ about: Please ask and answer questions here.
6
+ - name: Deforum Discord
7
+ url: https://discord.gg/deforum
8
+ about: Or here :)
extensions/deforum/.github/ISSUE_TEMPLATE/feature_request.yml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Feature request
2
+ description: Suggest an idea for the Deforum extension
3
+ title: "[Feature Request]: "
4
+ labels: ["enhancement"]
5
+
6
+ body:
7
+ - type: checkboxes
8
+ attributes:
9
+ label: Is there an existing issue for this?
10
+ description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit.
11
+ options:
12
+ - label: I have searched the existing issues and checked the recent builds/commits
13
+ required: true
14
+ - type: markdown
15
+ attributes:
16
+ value: |
17
+ *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible*
18
+ - type: textarea
19
+ id: feature
20
+ attributes:
21
+ label: What would your feature do ?
22
+ description: Tell us about your feature in a very clear and simple way, and what problem it would solve
23
+ validations:
24
+ required: true
25
+ - type: textarea
26
+ id: workflow
27
+ attributes:
28
+ label: Proposed workflow
29
+ description: Please provide us with step by step information on how you'd like the feature to be accessed and used
30
+ value: |
31
+ 1. Go to ....
32
+ 2. Press ....
33
+ 3. ...
34
+ validations:
35
+ required: true
36
+ - type: textarea
37
+ id: misc
38
+ attributes:
39
+ label: Additional information
40
+ description: Add any other context or screenshots about the feature request here.
extensions/deforum/.gitignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Unnecessary compiled python files.
2
+ __pycache__
3
+ *.pyc
4
+ *.pyo
5
+
6
+ # Output Images
7
+ outputs
8
+
9
+ # Log files for colab-convert
10
+ cc-outputs.log
extensions/deforum/CONTRIBUTING.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Contributing
2
+
3
+ As a part of the Deforum team I (kabachuha) want this script extension to remain a part of the Deforum project.
4
+
5
+ Thus, if you want to submit feature request or bugfix, unless it only relates to automatic1111's porting issues, consider making a PR first to the parent repository notebook https://github.com/deforum/stable-diffusion.
6
+
7
+ Also, you may want to inforum the dev team about your work via Discord https://discord.gg/deforum to ensure that no one else is working on the same stuff.
extensions/deforum/LICENSE ADDED
The diff for this file is too large to render. See raw diff
 
extensions/deforum/README.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Deforum Stable Diffusion — official extension for AUTOMATIC1111's webui
3
+
4
+ <p align="left">
5
+ <a href="https://github.com/deforum-art/deforum-for-automatic1111-webui/commits"><img alt="Last Commit" src="https://img.shields.io/github/last-commit/deforum-art/deforum-for-automatic1111-webui"></a>
6
+ <a href="https://github.com/deforum-art/deforum-for-automatic1111-webui/issues"><img alt="GitHub issues" src="https://img.shields.io/github/issues/deforum-art/deforum-for-automatic1111-webui"></a>
7
+ <a href="https://github.com/deforum-art/deforum-for-automatic1111-webui/stargazers"><img alt="GitHub stars" src="https://img.shields.io/github/stars/deforum-art/deforum-for-automatic1111-webui"></a>
8
+ <a href="https://github.com/deforum-art/deforum-for-automatic1111-webui/network"><img alt="GitHub forks" src="https://img.shields.io/github/forks/deforum-art/deforum-for-automatic1111-webui"></a>
9
+ </a>
10
+ </p>
11
+
12
+ ## Before Starting
13
+
14
+ **Important note about versions updating:** <br>
15
+ As auto's webui is getting updated multiple times a day, every day, things tend to break with regards to extensions compatability.
16
+ Therefore, it is best recommended to keep two folders:
17
+ 1. "Stable" folder that you don't regularly update, with versions that you know *work* together (we will provide info on this soon).
18
+ 2. "Experimental" folder in which you can add 'git pull' to your webui-user.bat, update deforum every day, etc. Keep it wild - but be prepared for bugs.
19
+
20
+
21
+ ## Getting Started
22
+
23
+ 1. Install [AUTOMATIC1111's webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui/). <br>If the repo link doesn't work, please use the alternate official download source: [https://gitgud.io/AUTOMATIC1111/stable-diffusion-webui](https://gitgud.io/AUTOMATIC1111/stable-diffusion-webui). To change your existing webui's installation origin, execute `git remote set-url origin https://gitgud.io/AUTOMATIC1111/stable-diffusion-webui` in the webui starting folder.
24
+
25
+ 2. Now two ways: either clone the repo into the `extensions` directory via git commandline launched within in the `stable-diffusion-webui` folder
26
+
27
+ ```sh
28
+ git clone https://github.com/deforum-art/deforum-for-automatic1111-webui extensions/deforum
29
+ ```
30
+
31
+ Or download this repository, locate the `extensions` folder within your WebUI installation, create a folder named `deforum` and put the contents of the downloaded directory inside of it. Then restart WebUI. **Warning: the extension folder has to be named 'deforum' or 'deforum-for-automatic1111-webui', otherwise it will fail to locate the 3D modules as the PATH addition is hardcoded**
32
+
33
+ 3. Open the webui, find the Deforum tab at the top of the page.
34
+
35
+ 4. Enter the animation settings. Refer to [this general guide](https://docs.google.com/document/d/1pEobUknMFMkn8F5TMsv8qRzamXX_75BShMMXV8IFslI/edit) and [this guide to math keyframing functions in Deforum](https://docs.google.com/document/d/1pfW1PwbDIuW0cv-dnuyYj1UzPqe23BlSLTJsqazffXM/edit?usp=sharing). However, **in this version prompt weights less than zero don't just like in original Deforum!** Split the positive and the negative prompt in the json section using --neg argument like this "apple:\`where(cos(t)>=0, cos(t), 0)\`, snow --neg strawberry:\`where(cos(t)<0, -cos(t), 0)\`"
36
+
37
+ 5. To view animation frames as they're being made, without waiting for the completion of an animation, go to the 'Settings' tab and set the value of this toolbar **above zero**. Warning: it may slow down the generation process. If you have 'Do exactly the amount of steps the slider specifies' checkbox selected in the tab, unselect it as it won't allow you to use Deforum schedules and you will get adrupt frame changes without transitions. Then click 'Apply settings' at the top of the page. Now return to the 'Deforum' tab.
38
+
39
+ ![adsdasunknown](https://user-images.githubusercontent.com/14872007/196064311-1b79866a-e55b-438a-84a7-004ff30829ad.png)
40
+
41
+
42
+ 6. Run the script and see if you got it working or even got something. **In 3D mode a large delay is expected at first** as the script loads the depth models. In the end, using the default settings the whole thing should consume 6.4 GBs of VRAM at 3D mode peaks and no more than 3.8 GB VRAM in 3D mode if you launch the webui with the '--lowvram' command line argument.
43
+
44
+ 7. After the generation process is completed, click the button with the self-describing name to show the video or gif result right in the GUI!
45
+
46
+ 8. Join our Discord where you can post generated stuff, ask questions and more: https://discord.gg/deforum. <br>
47
+ * There's also the 'Issues' tab in the repo, for well... reporting issues ;)
48
+
49
+ 9. Profit!
50
+
51
+ ## Known issues
52
+
53
+ * This port is not fully backward-compatible with the notebook and the local version both due to the changes in how AUTOMATIC1111's webui handles Stable Diffusion models and the changes in this script to get it to work in the new environment. *Expect* that you may not get exactly the same result or that the thing may break down because of the older settings.
54
+
55
+ ## Screenshots
56
+
57
+ https://user-images.githubusercontent.com/121192995/215522284-d6fbedd5-09e2-4d2c-bd10-f9bbb4a20f82.mp4
58
+
59
+ Main extension tab:
60
+
61
+ ![maintab](https://user-images.githubusercontent.com/121192995/215362176-4e5599c1-9cb6-4bf9-964d-0ff882661993.png)
62
+
63
+ Keyframes tab:
64
+
65
+ ![keyframes](https://user-images.githubusercontent.com/121192995/215362228-c239c43a-d565-4862-b490-d18b19eaaaa5.png)
66
+
67
+ Math evaluation:
68
+
69
+ ![math-eval](https://user-images.githubusercontent.com/121192995/215362467-481127a4-247a-4b0d-924a-d10719aa4c01.png)
70
+
71
+
72
+ ## Benchmarks
73
+
74
+ 3D mode without additional WebUI flags
75
+
76
+ ![image](https://user-images.githubusercontent.com/14872007/196294447-7817f138-ec4b-4001-885f-454f8667100d.png)
77
+
78
+ 3D mode when WebUI is launched with '--lowvram'
79
+
80
+ ![image](https://user-images.githubusercontent.com/14872007/196294517-125fbb27-c06d-4c4b-bcbc-7c743103eff6.png)
81
+
extensions/deforum/install.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import launch
2
+ import os
3
+ import sys
4
+
5
+ req_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "requirements.txt")
6
+
7
+ with open(req_file) as file:
8
+ for lib in file:
9
+ lib = lib.strip()
10
+ if not launch.is_installed(lib):
11
+ if lib == 'rich':
12
+ launch.run(f'"{sys.executable}" -m pip install {lib}', desc=f"Installing Deforum requirement: {lib}", errdesc=f"Couldn't install {lib}")
13
+ else:
14
+ launch.run_pip(f"install {lib}", f"Deforum requirement: {lib}")
extensions/deforum/javascript/deforum-hints.js ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // mouseover tooltips for various UI elements
2
+
3
+ deforum_titles = {
4
+ //Run
5
+ "Override settings": "specify a custom settings file and ignore settings displayed in the interface",
6
+ "Custom settings file": "the path to a custom settings file",
7
+ "Width": "The width of the output images, in pixels (must be a multiple of 64)",
8
+ "Height": "The height of the output images, in pixels (must be a multiple of 64)",
9
+ "Restore faces": "Restore low quality faces using GFPGAN neural network",
10
+ "Tiling": "Produce an image that can be tiled.",
11
+ "Highres. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
12
+ "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result",
13
+ "Sampler": "Which algorithm to use to produce the image",
14
+ "Enable extras": "enable additional seed settings",
15
+ "Subseed": "Seed of a different picture to be mixed into the generation.",
16
+ "Subseed strength": "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).",
17
+ "Resize seed from width": "Normally, changing the resolution will completely change an image, even when using the same seed. If you generated an image with a particular seed and then changed the resolution, put the original resolution here to get an image that more closely resemles the original",
18
+ "Resize seed from height": "Normally, changing the resolution will completely change an image, even when using the same seed. If you generated an image with a particular seed and then changed the resolution, put the original resolution here to get an image that more closely resemles the original",
19
+ "Steps": "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results",
20
+ //"ddim_eta": "";
21
+ //"n_batch": "",
22
+ //"make_grid": "",
23
+ //"grid_rows": "",
24
+ //"save_settings": "",
25
+ //"save_samples": "",
26
+ "Batch name": "output images will be placed in a folder with this name, inside of the img2img output folder",
27
+ "Pix2Pix img CFG schedule": "*Only in use with pix2pix checkpoints!*",
28
+ "Filename format": "specify the format of the filename for output images",
29
+ "Seed behavior": "defines the seed behavior that is used for animations",
30
+ "iter": "the seed value will increment by 1 for each subsequent frame of the animation",
31
+ "fixed": "the seed will remain fixed across all frames of animation",
32
+ "random": "a random seed will be used on each frame of the animation",
33
+ "schedule": "specify your own seed schedule (found on the Keyframes page)",
34
+
35
+ //Keyframes
36
+ "Animation mode": "selects the type of animation",
37
+ "2D": "only 2D motion parameters will be used, but this mode uses the least amount of VRAM. You can optionally enable flip_2d_perspective to enable some psuedo-3d animation parameters while in 2D mode.",
38
+ "3D": "enables all 3D motion parameters.",
39
+ "Video Input": "will ignore all motion parameters and attempt to reference a video loaded into the runtime, specified by the video_init_path. Max_frames is ignored during video_input mode, and instead, follows the number of frames pulled from the video’s length. Resume_from_timestring is NOT available with Video_Input mode.",
40
+ "Max frames": "the maximum number of output images to be created",
41
+ "Border": "controls handling method of pixels to be generated when the image is smaller than the frame.",
42
+ "wrap": "pulls pixels from the opposite edge of the image",
43
+ "replicate": "repeats the edge of the pixels, and extends them. Animations with quick motion may yield lines where this border function was attempting to populate pixels into the empty space created.",
44
+ "Angle": "2D operator to rotate canvas clockwise/anticlockwise in degrees per frame",
45
+ "Zoom": "2D operator that scales the canvas size, multiplicatively. [static = 1.0]",
46
+ "Translation X": "2D & 3D operator to move canvas left/right in pixels per frame",
47
+ "Translation Y": "2D & 3D operator to move canvas up/down in pixels per frame",
48
+ "Translation Z": "3D operator to move canvas towards/away from view [speed set by FOV]",
49
+ "Rotation 3D X": "3D operator to tilt canvas up/down in degrees per frame",
50
+ "Rotation 3D Y": "3D operator to pan canvas left/right in degrees per frame",
51
+ "Rotation 3D Z": "3D operator to roll canvas clockwise/anticlockwise",
52
+ "Enable perspective flip": "enables 2D mode functions to simulate faux 3D movement",
53
+ "Perspective flip theta": "the roll effect angle",
54
+ "Perspective flip phi": "the tilt effect angle",
55
+ "Perspective flip gamma": "the pan effect angle",
56
+ "Perspective flip fv": "the 2D vanishing point of perspective (recommended range 30-160)",
57
+ "Noise schedule": "amount of graininess to add per frame for diffusion diversity",
58
+ "Strength schedule": "amount of presence of previous frame to influence next frame, also controls steps in the following formula [steps - (strength_schedule * steps)]",
59
+ "Sampler schedule": "controls which sampler to use at a specific scheduled frame",
60
+ "Contrast schedule": "adjusts the overall contrast per frame [default neutral at 1.0]",
61
+ "CFG scale schedule": "how closely the image should conform to the prompt. Lower values produce more creative results. (recommended range 5-15)",
62
+ "FOV schedule": "adjusts the scale at which the canvas is moved in 3D by the translation_z value. [maximum range -180 to +180, with 0 being undefined. Values closer to 180 will make the image have less depth, while values closer to 0 will allow more depth]",
63
+ //"near_schedule": "",
64
+ //"far_schedule": "",
65
+ "Seed schedule": "allows you to specify seeds at a specific schedule, if seed_behavior is set to schedule.",
66
+ "Color coherence": "The color coherence will attempt to sample the overall pixel color information, and trend those values analyzed in the first frame to be applied to future frames.",
67
+ // "None": "Disable color coherence",
68
+ "Match Frame 0 HSV": "HSV is a good method for balancing presence of vibrant colors, but may produce unrealistic results - (ie.blue apples)",
69
+ "Match Frame 0 LAB": "LAB is a more linear approach to mimic human perception of color space - a good default setting for most users.",
70
+ "Match Frame 0 RGB": "RGB is good for enforcing unbiased amounts of color in each red, green and blue channel - some images may yield colorized artifacts if sampling is too low.",
71
+ "Cadence": "A setting of 1 will cause every frame to receive diffusion in the sequence of image outputs. A setting of 2 will only diffuse on every other frame, yet motion will still be in effect. The output of images during the cadence sequence will be automatically blended, additively and saved to the specified drive. This may improve the illusion of coherence in some workflows as the content and context of an image will not change or diffuse during frames that were skipped. Higher values of 4-8 cadence will skip over a larger amount of frames and only diffuse the “Nth” frame as set by the diffusion_cadence value. This may produce more continuity in an animation, at the cost of little opportunity to add more diffused content. In extreme examples, motion within a frame will fail to produce diverse prompt context, and the space will be filled with lines or approximations of content - resulting in unexpected animation patterns and artifacts. Video Input & Interpolation modes are not affected by diffusion_cadence.",
72
+ "Noise type": "Selects the type of noise being added to each frame",
73
+ "uniform": "Uniform noise covers the entire frame. It somewhat flattens and sharpens the video over time, but may be good for cartoonish look. This is the old default setting.",
74
+ "perlin": "Perlin noise is a more natural looking noise. It is heterogeneous and less sharp than uniform noise, this way it is more likely that new details will appear in a more coherent way. This is the new default setting.",
75
+ "Perlin W": "The width of the Perlin sample. Lower values will make larger noise regions. Think of it as inverse brush stroke width. The greater this setting, the smaller details it will affect.",
76
+ "Perlin H": "The height of the Perlin sample. Lower values will make larger noise regions. Think of it as inverse brush stroke width. The greater this setting, the smaller details it will affect.",
77
+ "Perlin octaves": "The number of Perlin noise octaves, that is the count of P-noise iterations. Higher values will make the noise more soft and smoke-like, whereas lower values will make it look more organic and spotty. It is limited by 8 octaves as the resulting gain will run out of bounds.",
78
+ "Perlin persistence": "How much of noise from each octave is added on each iteration. Higher values will make it more straighter and sharper, while lower values will make it rounder and smoother. It is limited by 1.0 as the resulting gain fill the frame completely with noise.",
79
+ "Use depth warping": "enables instructions to warp an image dynamically in 3D mode only.",
80
+ "MiDaS weight": "sets a midpoint at which a depthmap is to be drawn: range [-1 to +1]",
81
+ "Padding mode": "instructs the handling of pixels outside the field of view as they come into the scene.",
82
+ //"border": "Border will attempt to use the edges of the canvas as the pixels to be drawn", //duplicate name as another property
83
+ "reflection": "reflection will attempt to approximate the image and tile/repeat pixels",
84
+ "zeros": "zeros will not add any new pixel information",
85
+ "sampling_mode": "choose from Bicubic, Bilinear or Nearest modes. (Recommended: Bicubic)",
86
+ "Save depth maps": "will output a greyscale depth map image alongside the output images.",
87
+
88
+ // Prompts
89
+ "Prompts": "prompts for your animation in a JSON format. Use --neg words to add 'words' as negative prompt",
90
+ "Prompts positive": "positive prompt to be appended to *all* prompts",
91
+ "Prompts negative": "negative prompt to be appended to *all* prompts. DON'T use --neg here!",
92
+
93
+ //Init
94
+ "Use init": "Diffuse the first frame based on an image, similar to img2img.",
95
+ "Strength": "Controls the strength of the diffusion on the init image. 0 = disabled",
96
+ "Strength 0 no init": "Set the strength to 0 automatically when no init image is used",
97
+ "Init image": "the path to your init image",
98
+ "Use mask": "Use a grayscale image as a mask on your init image. Whiter areas of the mask are areas that change more.",
99
+ "Use alpha as mask": "use the alpha channel of the init image as the mask",
100
+ "Mask file": "the path to your mask image",
101
+ "Invert mask": "Inverts the colors of the mask",
102
+ "Mask brightness adjust": "adjust the brightness of the mask. Should be a positive number, with 1.0 meaning no adjustment.",
103
+ "Mask contrast adjust": "adjust the brightness of the mask. Should be a positive number, with 1.0 meaning no adjustment.",
104
+ "overlay mask": "Overlay the masked image at the end of the generation so it does not get degraded by encoding and decoding",
105
+ "Mask overlay blur": "Blur edges of final overlay mask, if used. Minimum = 0 (no blur)",
106
+ "Video init path": "the directory \/ URL at which your video file is located for Video Input mode only",
107
+ "Extract nth frame": "during the run sequence, only frames specified by this value will be extracted, saved, and diffused upon. A value of 1 indicates that every frame is to be accounted for. Values of 2 will use every other frame for the sequence. Higher values will skip that number of frames respectively.",
108
+ "Extract from frame":"start extracting the input video only from this frame number",
109
+ "Extract to frame": "stop the extraction of the video at this frame number. -1 for no limits",
110
+ "Overwrite extracted frames": "when enabled, will re-extract video frames each run. When using video_input mode, the run will be instructed to write video frames to the drive. If you’ve already populated the frames needed, uncheck this box to skip past redundant extraction, and immediately start the render. If you have not extracted frames, you must run at least once with this box checked to write the necessary frames.",
111
+ "Use mask video": "video_input mode only, enables the extraction and use of a separate video file intended for use as a mask. White areas of the extracted video frames will not be affected by diffusion, while black areas will be fully effected. Lighter/darker areas are affected dynamically.",
112
+ "Video mask path": "the directory in which your mask video is located.",
113
+ "Interpolate key frames": "selects whether to ignore prompt schedule or _x_frames.",
114
+ "Interpolate x frames": "the number of frames to transition thru between prompts (when interpolate_key_frames = true, then the numbers in front of the animation prompts will dynamically guide the images based on their value. If set to false, will ignore the prompt numbers and force interpole_x_frames value regardless of prompt number)",
115
+ "Resume from timestring": "instructs the run to start from a specified point",
116
+ "Resume timestring": "the required timestamp to reference when resuming. Currently only available in 2D & 3D mode, the timestamp is saved as the settings .txt file name as well as images produced during your previous run. The format follows: yyyymmddhhmmss - a timestamp of when the run was started to diffuse.",
117
+
118
+ //Video Output
119
+ "Skip video for run all": "when checked, do not output a video",
120
+ "Make GIF": "create a gif in addition to .mp4 file. supports up to 30 fps, will self-disable at higher fps values",
121
+ "Upscale":"upscale the images of the next run once it's finished + make a video out of them",
122
+ "Upscale model":"model of the upscaler to use. 'realesr-animevideov3' is much faster but yields smoother, less detailed results. the other models only do x4",
123
+ "Upscale factor":"how many times to upscale, actual options depend on the chosen upscale model",
124
+ "FPS": "The frames per second that the video will run at",
125
+ "Output format": "select the type of video file to output",
126
+ "PIL gif": "create an animated GIF",
127
+ "FFMPEG mp4": "create an MP4 video file",
128
+ "FFmpeg location": "the path to where ffmpeg is located. Leave at default 'ffmpeg' if ffmpeg is in your PATH!",
129
+ "FFmpeg crf": "controls quality where lower is better, less compressed. values: 0 to 51, default 17",
130
+ "FFmpeg preset": "controls how good the compression is, and the operation speed. If you're not in a rush keep it at 'veryslow'",
131
+ "Add soundtrack": "when this box is checked, and FFMPEG mp4 is selected as the output format, an audio file will be multiplexed with the video.",
132
+ "Soundtrack path": "the path\/ URL to an audio file to accompany the video",
133
+ "Use manual settings": "when this is unchecked, the video will automatically be created in the same output folder as the images. Check this box to specify different settings for the creation of the video, specified by the following options",
134
+ "Render steps": "render each step of diffusion as a separate frame",
135
+ "Max video frames": "the maximum number of frames to include in the video, when use_manual_settings is checked",
136
+ //"path_name_modifier": "",
137
+ "Image path": "the location of images to create the video from, when use_manual_settings is checked",
138
+ "MP4 path": "the output location of the mp4 file, when use_manual_settings is checked",
139
+ "Engine": "choose the frame interpolation engine and version",
140
+ "Interp X":"how many times to interpolate the source video. e.g source video fps of 12 and a value of x2 will yield a 24fps interpolated video",
141
+ "Slow-Mo X":"how many times to slow-down the video. *Naturally affects output fps as well",
142
+ "Keep Imgs": "delete or keep raw affected (interpolated/ upscaled depending on the UI section) png imgs",
143
+ "Interpolate an existing video":"This feature allows you to interpolate any video with a dedicated button. Video could be completly unrelated to deforum",
144
+ "In Frame Count": "uploaded video total frame count",
145
+ "In FPS":"uploaded video FPS",
146
+ "Interpolated Vid FPS":"calculated output-interpolated video FPS",
147
+ "In Res":"uploaded video resolution",
148
+ "Out Res":"output video resolution",
149
+
150
+ // Looper Args
151
+ // "use_looper": "",
152
+ "Enable guided images mode": "check this box to enable guided images mode",
153
+ "Images to use for keyframe guidance": "images you iterate over, you can do local or web paths (no single backslashes!)",
154
+ "Image strength schedule": "how much the image should look like the previou one and new image frame init. strength schedule might be better if this is higher, around .75 during the keyfames you want to switch on",
155
+ "Blend factor max": "blendFactor = blendFactorMax - blendFactorSlope * cos((frame % tweening_frames_schedule) / (tweening_frames_schedule / 2))",
156
+ "Blend factor slope": "blendFactor = blendFactorMax - blendFactorSlope * cos((frame % tweening_frames_schedule) / (tweening_frames_schedule / 2))",
157
+ "Tweening frames schedule": "number of the frames that we will blend between current imagined image and input frame image",
158
+ "Color correction factor": "how close to get to the colors of the input frame image/ the amount each frame during a tweening step to use the new images colors"
159
+ }
160
+
161
+
162
+ onUiUpdate(function(){
163
+ gradioApp().querySelectorAll('span, button, select, p').forEach(function(span){
164
+ tooltip = deforum_titles[span.textContent];
165
+
166
+ if(!tooltip){
167
+ tooltip = deforum_titles[span.value];
168
+ }
169
+
170
+ if(!tooltip){
171
+ for (const c of span.classList) {
172
+ if (c in deforum_titles) {
173
+ tooltip = deforum_titles[c];
174
+ break;
175
+ }
176
+ }
177
+ }
178
+
179
+ if(tooltip){
180
+ span.title = tooltip;
181
+ }
182
+ })
183
+
184
+ gradioApp().querySelectorAll('select').forEach(function(select){
185
+ if (select.onchange != null) return;
186
+
187
+ select.onchange = function(){
188
+ select.title = deforum_titles[select.value] || "";
189
+ }
190
+ })
191
+ })
extensions/deforum/javascript/deforum.js ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ function submit_deforum(){
2
+ // alert('Hello, Deforum!')
3
+ rememberGallerySelection('deforum_gallery')
4
+ showSubmitButtons('deforum', false)
5
+
6
+ var id = randomId()
7
+ requestProgress(id, gradioApp().getElementById('deforum_gallery_container'), gradioApp().getElementById('deforum_gallery'), function(){
8
+ showSubmitButtons('deforum', true)
9
+ })
10
+
11
+ var res = create_submit_args(arguments)
12
+
13
+ res[0] = id
14
+ // res[1] = get_tab_index('deforum')
15
+
16
+ return res
17
+ }
18
+
19
+ onUiUpdate(function(){
20
+ check_gallery('deforum_gallery')
21
+ })
extensions/deforum/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ numexpr
2
+ matplotlib
3
+ pandas
4
+ av
5
+ pims
6
+ imageio_ffmpeg
7
+ rich
extensions/deforum/scripts/deforum.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Detach 'deforum_helpers' from 'scripts' to prevent "No module named 'scripts.deforum_helpers'" error
2
+ # causing Deforum's tab not show up in some cases when you've might've broken the environment with webui packages updates
3
+ import sys, os, shutil
4
+
5
+ basedirs = [os.getcwd()]
6
+ if 'google.colab' in sys.modules:
7
+ basedirs.append('/content/gdrive/MyDrive/sd/stable-diffusion-webui') #hardcode as TheLastBen's colab seems to be the primal source
8
+
9
+ for basedir in basedirs:
10
+ deforum_paths_to_ensure = [basedir + '/extensions/deforum-for-automatic1111-webui/scripts', basedir + '/extensions/sd-webui-controlnet', basedir + '/extensions/deforum/scripts', basedir + '/scripts/deforum_helpers/src', basedir + '/extensions/deforum/scripts/deforum_helpers/src', basedir +'/extensions/deforum-for-automatic1111-webui/scripts/deforum_helpers/src',basedir]
11
+
12
+ for deforum_scripts_path_fix in deforum_paths_to_ensure:
13
+ if not deforum_scripts_path_fix in sys.path:
14
+ sys.path.extend([deforum_scripts_path_fix])
15
+
16
+ # Main deforum stuff
17
+ import deforum_helpers.args as deforum_args
18
+ import deforum_helpers.settings as deforum_settings
19
+ from deforum_helpers.save_images import dump_frames_cache, reset_frames_cache
20
+ from deforum_helpers.frame_interpolation import process_video_interpolation
21
+
22
+ import modules.scripts as wscripts
23
+ from modules import script_callbacks
24
+ import gradio as gr
25
+ import json
26
+
27
+ from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
28
+ from PIL import Image
29
+ from deforum_helpers.video_audio_utilities import ffmpeg_stitch_video, make_gifski_gif
30
+ from deforum_helpers.upscaling import make_upscale_v2
31
+ import gc
32
+ import torch
33
+ from webui import wrap_gradio_gpu_call
34
+ import modules.shared as shared
35
+ from modules.shared import opts, cmd_opts, state
36
+ from modules.ui import create_output_panel, plaintext_to_html, wrap_gradio_call
37
+ from types import SimpleNamespace
38
+
39
+ def run_deforum(*args, **kwargs):
40
+ args_dict = {deforum_args.component_names[i]: args[i+2] for i in range(0, len(deforum_args.component_names))}
41
+ p = StableDiffusionProcessingImg2Img(
42
+ sd_model=shared.sd_model,
43
+ outpath_samples = opts.outdir_samples or opts.outdir_img2img_samples,
44
+ outpath_grids = opts.outdir_grids or opts.outdir_img2img_grids,
45
+ #we'll setup the rest later
46
+ )
47
+
48
+ print("\033[4;33mDeforum extension for auto1111 webui, v2.2b\033[0m")
49
+ args_dict['self'] = None
50
+ args_dict['p'] = p
51
+
52
+ root, args, anim_args, video_args, parseq_args, loop_args, controlnet_args = deforum_args.process_args(args_dict)
53
+ root.clipseg_model = None
54
+ root.initial_clipskip = opts.data["CLIP_stop_at_last_layers"]
55
+ root.basedirs = basedirs
56
+
57
+ for basedir in basedirs:
58
+ sys.path.extend([
59
+ basedir + '/scripts/deforum_helpers/src',
60
+ basedir + '/extensions/deforum/scripts/deforum_helpers/src',
61
+ basedir + '/extensions/deforum-for-automatic1111-webui/scripts/deforum_helpers/src',
62
+ ])
63
+
64
+ # clean up unused memory
65
+ reset_frames_cache(root)
66
+ gc.collect()
67
+ torch.cuda.empty_cache()
68
+
69
+ from deforum_helpers.render import render_animation
70
+ from deforum_helpers.render_modes import render_input_video, render_animation_with_video_mask, render_interpolation
71
+
72
+ tqdm_backup = shared.total_tqdm
73
+ shared.total_tqdm = deforum_settings.DeforumTQDM(args, anim_args, parseq_args)
74
+ try:
75
+ # dispatch to appropriate renderer
76
+ if anim_args.animation_mode == '2D' or anim_args.animation_mode == '3D':
77
+ if anim_args.use_mask_video:
78
+ render_animation_with_video_mask(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root.animation_prompts, root) # allow mask video without an input video
79
+ else:
80
+ render_animation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root.animation_prompts, root)
81
+ elif anim_args.animation_mode == 'Video Input':
82
+ render_input_video(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root.animation_prompts, root)#TODO: prettify code
83
+ elif anim_args.animation_mode == 'Interpolation':
84
+ render_interpolation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root.animation_prompts, root)
85
+ else:
86
+ print('Other modes are not available yet!')
87
+ finally:
88
+ shared.total_tqdm = tqdm_backup
89
+ opts.data["CLIP_stop_at_last_layers"] = root.initial_clipskip
90
+
91
+ if video_args.store_frames_in_ram:
92
+ dump_frames_cache(root)
93
+
94
+ from base64 import b64encode
95
+
96
+ real_audio_track = None
97
+ if video_args.add_soundtrack != 'None':
98
+ real_audio_track = anim_args.video_init_path if video_args.add_soundtrack == 'Init Video' else video_args.soundtrack_path
99
+
100
+ # Delete folder with duplicated imgs from OS temp folder
101
+ shutil.rmtree(root.tmp_deforum_run_duplicated_folder, ignore_errors=True)
102
+
103
+ # Decide whether or not we need to try and frame interpolate laters
104
+ need_to_frame_interpolate = False
105
+ if video_args.frame_interpolation_engine != "None" and not video_args.skip_video_for_run_all and not video_args.store_frames_in_ram:
106
+ need_to_frame_interpolate = True
107
+
108
+ if video_args.skip_video_for_run_all:
109
+ print('Skipping video creation, uncheck skip_video_for_run_all if you want to run it')
110
+ else:
111
+ import subprocess
112
+
113
+ path_name_modifier = video_args.path_name_modifier
114
+ if video_args.render_steps: # render steps from a single image
115
+ fname = f"{path_name_modifier}_%05d.png"
116
+ all_step_dirs = [os.path.join(args.outdir, d) for d in os.listdir(args.outdir) if os.path.isdir(os.path.join(args.outdir,d))]
117
+ newest_dir = max(all_step_dirs, key=os.path.getmtime)
118
+ image_path = os.path.join(newest_dir, fname)
119
+ print(f"Reading images from {image_path}")
120
+ mp4_path = os.path.join(newest_dir, f"{args.timestring}_{path_name_modifier}.mp4")
121
+ max_video_frames = args.steps
122
+ else: # render images for a video
123
+ image_path = os.path.join(args.outdir, f"{args.timestring}_%05d.png")
124
+ mp4_path = os.path.join(args.outdir, f"{args.timestring}.mp4")
125
+ max_video_frames = anim_args.max_frames
126
+
127
+ exclude_keys = deforum_settings.get_keys_to_exclude('video')
128
+ video_settings_filename = os.path.join(args.outdir, f"{args.timestring}_video-settings.txt")
129
+ with open(video_settings_filename, "w+", encoding="utf-8") as f:
130
+ s = {}
131
+ for key, value in dict(video_args.__dict__).items():
132
+ if key not in exclude_keys:
133
+ s[key] = value
134
+ json.dump(s, f, ensure_ascii=False, indent=4)
135
+
136
+ # Stitch video using ffmpeg!
137
+ try:
138
+ ffmpeg_stitch_video(ffmpeg_location=video_args.ffmpeg_location, fps=video_args.fps, outmp4_path=mp4_path, stitch_from_frame=0, stitch_to_frame=max_video_frames, imgs_path=image_path, add_soundtrack=video_args.add_soundtrack, audio_path=real_audio_track, crf=video_args.ffmpeg_crf, preset=video_args.ffmpeg_preset)
139
+ mp4 = open(mp4_path,'rb').read()
140
+ data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
141
+ deforum_args.i1_store = f'<p style=\"font-weight:bold;margin-bottom:0em\">Deforum v0.5-webui-beta</p><video controls loop><source src="{data_url}" type="video/mp4"></video>'
142
+ except Exception as e:
143
+ if need_to_frame_interpolate:
144
+ print(f"FFMPEG DID NOT STITCH ANY VIDEO. However, you requested to frame interpolate - so we will continue to frame interpolation, but you'll be left only with the interpolated frames and not a video, since ffmpeg couldn't run. Original ffmpeg error: {e}")
145
+ else:
146
+ print(f"** FFMPEG DID NOT STITCH ANY VIDEO ** Error: {e}")
147
+ pass
148
+
149
+ if root.initial_info is None:
150
+ root.initial_info = "An error has occured and nothing has been generated!"
151
+ root.initial_info += "\nPlease, report the bug to https://github.com/deforum-art/deforum-for-automatic1111-webui/issues"
152
+ import numpy as np
153
+ a = np.random.rand(args.W, args.H, 3)*255
154
+ root.first_frame = Image.fromarray(a.astype('uint8')).convert('RGB')
155
+ root.initial_seed = 6934
156
+ # FRAME INTERPOLATION TIME
157
+ if need_to_frame_interpolate:
158
+ print(f"Got a request to *frame interpolate* using {video_args.frame_interpolation_engine}")
159
+ process_video_interpolation(frame_interpolation_engine=video_args.frame_interpolation_engine, frame_interpolation_x_amount=video_args.frame_interpolation_x_amount,frame_interpolation_slow_mo_enabled=video_args.frame_interpolation_slow_mo_enabled, frame_interpolation_slow_mo_amount=video_args.frame_interpolation_slow_mo_amount, orig_vid_fps=video_args.fps, deforum_models_path=root.models_path, real_audio_track=real_audio_track, raw_output_imgs_path=args.outdir, img_batch_id=args.timestring, ffmpeg_location=video_args.ffmpeg_location, ffmpeg_crf=video_args.ffmpeg_crf, ffmpeg_preset=video_args.ffmpeg_preset, keep_interp_imgs=video_args.frame_interpolation_keep_imgs, orig_vid_name=None, resolution=None)
160
+
161
+ if video_args.make_gif and not video_args.skip_video_for_run_all and not video_args.store_frames_in_ram:
162
+ make_gifski_gif(imgs_raw_path = args.outdir, imgs_batch_id = args.timestring, fps = video_args.fps, models_folder = root.models_path, current_user_os = root.current_user_os)
163
+
164
+ # Upscale video once generation is done:
165
+ if video_args.r_upscale_video and not video_args.skip_video_for_run_all and not video_args.store_frames_in_ram:
166
+
167
+ # out mp4 path is defined in make_upscale func
168
+ make_upscale_v2(upscale_factor = video_args.r_upscale_factor, upscale_model = video_args.r_upscale_model, keep_imgs = video_args.r_upscale_keep_imgs, imgs_raw_path = args.outdir, imgs_batch_id = args.timestring, fps = video_args.fps, deforum_models_path = root.models_path, current_user_os = root.current_user_os, ffmpeg_location=video_args.ffmpeg_location, stitch_from_frame=0, stitch_to_frame=max_video_frames, ffmpeg_crf=video_args.ffmpeg_crf, ffmpeg_preset=video_args.ffmpeg_preset, add_soundtrack = video_args.add_soundtrack ,audio_path=real_audio_track)
169
+
170
+ root.initial_info += "\n The animation is stored in " + args.outdir
171
+ root.initial_info += "\n Timestring = " + args.timestring + '\n'
172
+ root.initial_info += "Only the first frame is shown in webui not to clutter the memory"
173
+ reset_frames_cache(root) # cleanup the RAM in any case
174
+ processed = Processed(p, [root.first_frame], root.initial_seed, root.initial_info)
175
+
176
+ if processed is None:
177
+ processed = process_images(p)
178
+
179
+ shared.total_tqdm.clear()
180
+
181
+ generation_info_js = processed.js()
182
+ if opts.samples_log_stdout:
183
+ print(generation_info_js)
184
+
185
+ if opts.do_not_show_images:
186
+ processed.images = []
187
+
188
+ return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html('')
189
+
190
+ def on_ui_tabs():
191
+ with gr.Blocks(analytics_enabled=False) as deforum_interface:
192
+ components = {}
193
+ dummy_component = gr.Label(visible=False)
194
+ with gr.Row(elem_id='deforum_progress_row').style(equal_height=False):
195
+ with gr.Column(scale=1, variant='panel'):
196
+ components = deforum_args.setup_deforum_setting_dictionary(None, True, True)
197
+
198
+ with gr.Column(scale=1):
199
+ with gr.Row():
200
+ btn = gr.Button("Click here after the generation to show the video")
201
+ components['btn'] = btn
202
+ close_btn = gr.Button("Close the video", visible=False)
203
+ with gr.Row():
204
+ i1 = gr.HTML(deforum_args.i1_store, elem_id='deforum_header')
205
+ components['i1'] = i1
206
+ # Show video
207
+ def show_vid():
208
+ return {
209
+ i1: gr.update(value=deforum_args.i1_store, visible=True),
210
+ close_btn: gr.update(visible=True),
211
+ btn: gr.update(value="Update the video", visible=True),
212
+ }
213
+
214
+ btn.click(
215
+ show_vid,
216
+ [],
217
+ [i1, close_btn, btn],
218
+ )
219
+ # Close video
220
+ def close_vid():
221
+ return {
222
+ i1: gr.update(value=deforum_args.i1_store_backup, visible=True),
223
+ close_btn: gr.update(visible=False),
224
+ btn: gr.update(value="Click here after the generation to show the video", visible=True),
225
+ }
226
+
227
+ close_btn.click(
228
+ close_vid,
229
+ [],
230
+ [i1, close_btn, btn],
231
+ )
232
+ id_part = 'deforum'
233
+ with gr.Row(elem_id=f"{id_part}_generate_box"):
234
+ skip = gr.Button('Skip', elem_id=f"{id_part}_skip", visible=False)
235
+ interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", visible=True)
236
+ submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
237
+
238
+ skip.click(
239
+ fn=lambda: state.skip(),
240
+ inputs=[],
241
+ outputs=[],
242
+ )
243
+
244
+ interrupt.click(
245
+ fn=lambda: state.interrupt(),
246
+ inputs=[],
247
+ outputs=[],
248
+ )
249
+
250
+ deforum_gallery, generation_info, html_info, html_log = create_output_panel("deforum", opts.outdir_img2img_samples)
251
+
252
+ gr.HTML("<p>* Paths can be relative to webui folder OR full - absolute </p>")
253
+ with gr.Row():
254
+ settings_path = gr.Textbox("deforum_settings.txt", elem_id='deforum_settings_path', label="General Settings File")
255
+ #reuse_latest_settings_btn = gr.Button('Reuse Latest', elem_id='deforum_reuse_latest_settings_btn')#TODO
256
+ with gr.Row():
257
+ save_settings_btn = gr.Button('Save Settings', elem_id='deforum_save_settings_btn')
258
+ load_settings_btn = gr.Button('Load Settings', elem_id='deforum_load_settings_btn')
259
+ with gr.Row():
260
+ video_settings_path = gr.Textbox("deforum_video-settings.txt", elem_id='deforum_video_settings_path', label="Video Settings File")
261
+ #reuse_latest_video_settings_btn = gr.Button('Reuse Latest', elem_id='deforum_reuse_latest_video_settings_btn')#TODO
262
+ with gr.Row():
263
+ save_video_settings_btn = gr.Button('Save Video Settings', elem_id='deforum_save_video_settings_btn')
264
+ load_video_settings_btn = gr.Button('Load Video Settings', elem_id='deforum_load_video_settings_btn')
265
+
266
+ # components['prompts'].visible = False#hide prompts for the time being
267
+ #TODO clean up the code
268
+ components['save_sample_per_step'].visible = False
269
+ components['show_sample_per_step'].visible = False
270
+ components['display_samples'].visible = False
271
+
272
+ component_list = [components[name] for name in deforum_args.component_names]
273
+
274
+ submit.click(
275
+ fn=wrap_gradio_gpu_call(run_deforum, extra_outputs=[None, '', '']),
276
+ _js="submit_deforum",
277
+ inputs=[dummy_component, dummy_component] + component_list,
278
+ outputs=[
279
+ deforum_gallery,
280
+ generation_info,
281
+ html_info,
282
+ html_log,
283
+ ],
284
+ )
285
+
286
+ settings_component_list = [components[name] for name in deforum_args.settings_component_names]
287
+ video_settings_component_list = [components[name] for name in deforum_args.video_args_names]
288
+ stuff = gr.HTML("") # wrap gradio call garbage
289
+ stuff.visible = False
290
+
291
+ save_settings_btn.click(
292
+ fn=wrap_gradio_call(deforum_settings.save_settings),
293
+ inputs=[settings_path] + settings_component_list,
294
+ outputs=[stuff],
295
+ )
296
+
297
+ load_settings_btn.click(
298
+ fn=wrap_gradio_call(deforum_settings.load_settings),
299
+ inputs=[settings_path]+ settings_component_list,
300
+ outputs=settings_component_list + [stuff],
301
+ )
302
+
303
+ save_video_settings_btn.click(
304
+ fn=wrap_gradio_call(deforum_settings.save_video_settings),
305
+ inputs=[video_settings_path] + video_settings_component_list,
306
+ outputs=[stuff],
307
+ )
308
+
309
+ load_video_settings_btn.click(
310
+ fn=wrap_gradio_call(deforum_settings.load_video_settings),
311
+ inputs=[video_settings_path] + video_settings_component_list,
312
+ outputs=video_settings_component_list + [stuff],
313
+ )
314
+
315
+
316
+ return [(deforum_interface, "Deforum", "deforum_interface")]
317
+
318
+ script_callbacks.on_ui_tabs(on_ui_tabs)
extensions/deforum/scripts/deforum_helpers/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ """
2
+ from .save_images import save_samples, get_output_folder
3
+ from .depth import DepthModel
4
+ from .prompt import sanitize
5
+ from .animation import construct_RotationMatrixHomogenous, getRotationMatrixManual, getPoints_for_PerspectiveTranformEstimation, warpMatrix, anim_frame_warp_2d, anim_frame_warp_3d
6
+ from .generate import add_noise, load_img, load_mask_latent, prepare_mask
7
+ """
extensions/deforum/scripts/deforum_helpers/animation.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ from functools import reduce
4
+ import math
5
+ import py3d_tools as p3d
6
+ import torch
7
+ from einops import rearrange
8
+ from .prompt import check_is_number
9
+
10
+ # Webui
11
+ from modules.shared import state
12
+
13
+ def sample_from_cv2(sample: np.ndarray) -> torch.Tensor:
14
+ sample = ((sample.astype(float) / 255.0) * 2) - 1
15
+ sample = sample[None].transpose(0, 3, 1, 2).astype(np.float16)
16
+ sample = torch.from_numpy(sample)
17
+ return sample
18
+
19
+ def sample_to_cv2(sample: torch.Tensor, type=np.uint8) -> np.ndarray:
20
+ sample_f32 = rearrange(sample.squeeze().cpu().numpy(), "c h w -> h w c").astype(np.float32)
21
+ sample_f32 = ((sample_f32 * 0.5) + 0.5).clip(0, 1)
22
+ sample_int8 = (sample_f32 * 255)
23
+ return sample_int8.astype(type)
24
+
25
+ def construct_RotationMatrixHomogenous(rotation_angles):
26
+ assert(type(rotation_angles)==list and len(rotation_angles)==3)
27
+ RH = np.eye(4,4)
28
+ cv2.Rodrigues(np.array(rotation_angles), RH[0:3, 0:3])
29
+ return RH
30
+
31
+ # https://en.wikipedia.org/wiki/Rotation_matrix
32
+ def getRotationMatrixManual(rotation_angles):
33
+
34
+ rotation_angles = [np.deg2rad(x) for x in rotation_angles]
35
+
36
+ phi = rotation_angles[0] # around x
37
+ gamma = rotation_angles[1] # around y
38
+ theta = rotation_angles[2] # around z
39
+
40
+ # X rotation
41
+ Rphi = np.eye(4,4)
42
+ sp = np.sin(phi)
43
+ cp = np.cos(phi)
44
+ Rphi[1,1] = cp
45
+ Rphi[2,2] = Rphi[1,1]
46
+ Rphi[1,2] = -sp
47
+ Rphi[2,1] = sp
48
+
49
+ # Y rotation
50
+ Rgamma = np.eye(4,4)
51
+ sg = np.sin(gamma)
52
+ cg = np.cos(gamma)
53
+ Rgamma[0,0] = cg
54
+ Rgamma[2,2] = Rgamma[0,0]
55
+ Rgamma[0,2] = sg
56
+ Rgamma[2,0] = -sg
57
+
58
+ # Z rotation (in-image-plane)
59
+ Rtheta = np.eye(4,4)
60
+ st = np.sin(theta)
61
+ ct = np.cos(theta)
62
+ Rtheta[0,0] = ct
63
+ Rtheta[1,1] = Rtheta[0,0]
64
+ Rtheta[0,1] = -st
65
+ Rtheta[1,0] = st
66
+
67
+ R = reduce(lambda x,y : np.matmul(x,y), [Rphi, Rgamma, Rtheta])
68
+
69
+ return R
70
+
71
+ def getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sidelength):
72
+
73
+ ptsIn2D = ptsIn[0,:]
74
+ ptsOut2D = ptsOut[0,:]
75
+ ptsOut2Dlist = []
76
+ ptsIn2Dlist = []
77
+
78
+ for i in range(0,4):
79
+ ptsOut2Dlist.append([ptsOut2D[i,0], ptsOut2D[i,1]])
80
+ ptsIn2Dlist.append([ptsIn2D[i,0], ptsIn2D[i,1]])
81
+
82
+ pin = np.array(ptsIn2Dlist) + [W/2.,H/2.]
83
+ pout = (np.array(ptsOut2Dlist) + [1.,1.]) * (0.5*sidelength)
84
+ pin = pin.astype(np.float32)
85
+ pout = pout.astype(np.float32)
86
+
87
+ return pin, pout
88
+
89
+
90
+ def warpMatrix(W, H, theta, phi, gamma, scale, fV):
91
+
92
+ # M is to be estimated
93
+ M = np.eye(4, 4)
94
+
95
+ fVhalf = np.deg2rad(fV/2.)
96
+ d = np.sqrt(W*W+H*H)
97
+ sideLength = scale*d/np.cos(fVhalf)
98
+ h = d/(2.0*np.sin(fVhalf))
99
+ n = h-(d/2.0)
100
+ f = h+(d/2.0)
101
+
102
+ # Translation along Z-axis by -h
103
+ T = np.eye(4,4)
104
+ T[2,3] = -h
105
+
106
+ # Rotation matrices around x,y,z
107
+ R = getRotationMatrixManual([phi, gamma, theta])
108
+
109
+
110
+ # Projection Matrix
111
+ P = np.eye(4,4)
112
+ P[0,0] = 1.0/np.tan(fVhalf)
113
+ P[1,1] = P[0,0]
114
+ P[2,2] = -(f+n)/(f-n)
115
+ P[2,3] = -(2.0*f*n)/(f-n)
116
+ P[3,2] = -1.0
117
+
118
+ # pythonic matrix multiplication
119
+ F = reduce(lambda x,y : np.matmul(x,y), [P, T, R])
120
+
121
+ # shape should be 1,4,3 for ptsIn and ptsOut since perspectiveTransform() expects data in this way.
122
+ # In C++, this can be achieved by Mat ptsIn(1,4,CV_64FC3);
123
+ ptsIn = np.array([[
124
+ [-W/2., H/2., 0.],[ W/2., H/2., 0.],[ W/2.,-H/2., 0.],[-W/2.,-H/2., 0.]
125
+ ]])
126
+ ptsOut = np.array(np.zeros((ptsIn.shape), dtype=ptsIn.dtype))
127
+ ptsOut = cv2.perspectiveTransform(ptsIn, F)
128
+
129
+ ptsInPt2f, ptsOutPt2f = getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sideLength)
130
+
131
+ # check float32 otherwise OpenCV throws an error
132
+ assert(ptsInPt2f.dtype == np.float32)
133
+ assert(ptsOutPt2f.dtype == np.float32)
134
+ M33 = cv2.getPerspectiveTransform(ptsInPt2f,ptsOutPt2f)
135
+
136
+ return M33, sideLength
137
+
138
+ def get_flip_perspective_matrix(W, H, keys, frame_idx):
139
+ perspective_flip_theta = keys.perspective_flip_theta_series[frame_idx]
140
+ perspective_flip_phi = keys.perspective_flip_phi_series[frame_idx]
141
+ perspective_flip_gamma = keys.perspective_flip_gamma_series[frame_idx]
142
+ perspective_flip_fv = keys.perspective_flip_fv_series[frame_idx]
143
+ M,sl = warpMatrix(W, H, perspective_flip_theta, perspective_flip_phi, perspective_flip_gamma, 1., perspective_flip_fv);
144
+ post_trans_mat = np.float32([[1, 0, (W-sl)/2], [0, 1, (H-sl)/2]])
145
+ post_trans_mat = np.vstack([post_trans_mat, [0,0,1]])
146
+ bM = np.matmul(M, post_trans_mat)
147
+ return bM
148
+
149
+ def flip_3d_perspective(anim_args, prev_img_cv2, keys, frame_idx):
150
+ W, H = (prev_img_cv2.shape[1], prev_img_cv2.shape[0])
151
+ return cv2.warpPerspective(
152
+ prev_img_cv2,
153
+ get_flip_perspective_matrix(W, H, keys, frame_idx),
154
+ (W, H),
155
+ borderMode=cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE
156
+ )
157
+
158
+ def anim_frame_warp(prev_img_cv2, args, anim_args, keys, frame_idx, depth_model=None, depth=None, device='cuda', half_precision = False):
159
+
160
+ if anim_args.use_depth_warping:
161
+ if depth is None and depth_model is not None:
162
+ depth = depth_model.predict(prev_img_cv2, anim_args, half_precision)
163
+ else:
164
+ depth = None
165
+
166
+ if anim_args.animation_mode == '2D':
167
+ prev_img = anim_frame_warp_2d(prev_img_cv2, args, anim_args, keys, frame_idx)
168
+ else: # '3D'
169
+ prev_img = anim_frame_warp_3d(device, prev_img_cv2, depth, anim_args, keys, frame_idx)
170
+
171
+ return prev_img, depth
172
+
173
+ def anim_frame_warp_2d(prev_img_cv2, args, anim_args, keys, frame_idx):
174
+ angle = keys.angle_series[frame_idx]
175
+ zoom = keys.zoom_series[frame_idx]
176
+ translation_x = keys.translation_x_series[frame_idx]
177
+ translation_y = keys.translation_y_series[frame_idx]
178
+
179
+ center = (args.W // 2, args.H // 2)
180
+ trans_mat = np.float32([[1, 0, translation_x], [0, 1, translation_y]])
181
+ rot_mat = cv2.getRotationMatrix2D(center, angle, zoom)
182
+ trans_mat = np.vstack([trans_mat, [0,0,1]])
183
+ rot_mat = np.vstack([rot_mat, [0,0,1]])
184
+ if anim_args.enable_perspective_flip:
185
+ bM = get_flip_perspective_matrix(args.W, args.H, keys, frame_idx)
186
+ rot_mat = np.matmul(bM, rot_mat, trans_mat)
187
+ else:
188
+ rot_mat = np.matmul(rot_mat, trans_mat)
189
+ return cv2.warpPerspective(
190
+ prev_img_cv2,
191
+ rot_mat,
192
+ (prev_img_cv2.shape[1], prev_img_cv2.shape[0]),
193
+ borderMode=cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE
194
+ )
195
+
196
+ def anim_frame_warp_3d(device, prev_img_cv2, depth, anim_args, keys, frame_idx):
197
+ TRANSLATION_SCALE = 1.0/200.0 # matches Disco
198
+ translate_xyz = [
199
+ -keys.translation_x_series[frame_idx] * TRANSLATION_SCALE,
200
+ keys.translation_y_series[frame_idx] * TRANSLATION_SCALE,
201
+ -keys.translation_z_series[frame_idx] * TRANSLATION_SCALE
202
+ ]
203
+ rotate_xyz = [
204
+ math.radians(keys.rotation_3d_x_series[frame_idx]),
205
+ math.radians(keys.rotation_3d_y_series[frame_idx]),
206
+ math.radians(keys.rotation_3d_z_series[frame_idx])
207
+ ]
208
+ if anim_args.enable_perspective_flip:
209
+ prev_img_cv2 = flip_3d_perspective(anim_args, prev_img_cv2, keys, frame_idx)
210
+ rot_mat = p3d.euler_angles_to_matrix(torch.tensor(rotate_xyz, device=device), "XYZ").unsqueeze(0)
211
+ result = transform_image_3d(device if not device.type.startswith('mps') else torch.device('cpu'), prev_img_cv2, depth, rot_mat, translate_xyz, anim_args, keys, frame_idx)
212
+ torch.cuda.empty_cache()
213
+ return result
214
+
215
+ def transform_image_3d(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx):
216
+ # adapted and optimized version of transform_image_3d from Disco Diffusion https://github.com/alembics/disco-diffusion
217
+ w, h = prev_img_cv2.shape[1], prev_img_cv2.shape[0]
218
+
219
+ aspect_ratio = float(w)/float(h)
220
+ near = keys.near_series[frame_idx]
221
+ far = keys.far_series[frame_idx]
222
+ fov_deg = keys.fov_series[frame_idx]
223
+ persp_cam_old = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, device=device)
224
+ persp_cam_new = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, R=rot_mat, T=torch.tensor([translate]), device=device)
225
+
226
+ # range of [-1,1] is important to torch grid_sample's padding handling
227
+ y,x = torch.meshgrid(torch.linspace(-1.,1.,h,dtype=torch.float32,device=device),torch.linspace(-1.,1.,w,dtype=torch.float32,device=device))
228
+ if depth_tensor is None:
229
+ z = torch.ones_like(x)
230
+ else:
231
+ z = torch.as_tensor(depth_tensor, dtype=torch.float32, device=device)
232
+ xyz_old_world = torch.stack((x.flatten(), y.flatten(), z.flatten()), dim=1)
233
+
234
+ xyz_old_cam_xy = persp_cam_old.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2]
235
+ xyz_new_cam_xy = persp_cam_new.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2]
236
+
237
+ offset_xy = xyz_new_cam_xy - xyz_old_cam_xy
238
+ # affine_grid theta param expects a batch of 2D mats. Each is 2x3 to do rotation+translation.
239
+ identity_2d_batch = torch.tensor([[1.,0.,0.],[0.,1.,0.]], device=device).unsqueeze(0)
240
+ # coords_2d will have shape (N,H,W,2).. which is also what grid_sample needs.
241
+ coords_2d = torch.nn.functional.affine_grid(identity_2d_batch, [1,1,h,w], align_corners=False)
242
+ offset_coords_2d = coords_2d - torch.reshape(offset_xy, (h,w,2)).unsqueeze(0)
243
+
244
+ image_tensor = rearrange(torch.from_numpy(prev_img_cv2.astype(np.float32)), 'h w c -> c h w').to(device)
245
+ new_image = torch.nn.functional.grid_sample(
246
+ image_tensor.add(1/512 - 0.0001).unsqueeze(0),
247
+ offset_coords_2d,
248
+ mode=anim_args.sampling_mode,
249
+ padding_mode=anim_args.padding_mode,
250
+ align_corners=False
251
+ )
252
+
253
+ # convert back to cv2 style numpy array
254
+ result = rearrange(
255
+ new_image.squeeze().clamp(0,255),
256
+ 'c h w -> h w c'
257
+ ).cpu().numpy().astype(prev_img_cv2.dtype)
258
+ return result
extensions/deforum/scripts/deforum_helpers/animation_key_frames.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import numpy as np
3
+ import numexpr
4
+ import pandas as pd
5
+ from .prompt import check_is_number
6
+
7
+ class DeformAnimKeys():
8
+ def __init__(self, anim_args):
9
+ self.angle_series = get_inbetweens(parse_key_frames(anim_args.angle), anim_args.max_frames)
10
+ self.zoom_series = get_inbetweens(parse_key_frames(anim_args.zoom), anim_args.max_frames)
11
+ self.translation_x_series = get_inbetweens(parse_key_frames(anim_args.translation_x), anim_args.max_frames)
12
+ self.translation_y_series = get_inbetweens(parse_key_frames(anim_args.translation_y), anim_args.max_frames)
13
+ self.translation_z_series = get_inbetweens(parse_key_frames(anim_args.translation_z), anim_args.max_frames)
14
+ self.rotation_3d_x_series = get_inbetweens(parse_key_frames(anim_args.rotation_3d_x), anim_args.max_frames)
15
+ self.rotation_3d_y_series = get_inbetweens(parse_key_frames(anim_args.rotation_3d_y), anim_args.max_frames)
16
+ self.rotation_3d_z_series = get_inbetweens(parse_key_frames(anim_args.rotation_3d_z), anim_args.max_frames)
17
+ self.perspective_flip_theta_series = get_inbetweens(parse_key_frames(anim_args.perspective_flip_theta), anim_args.max_frames)
18
+ self.perspective_flip_phi_series = get_inbetweens(parse_key_frames(anim_args.perspective_flip_phi), anim_args.max_frames)
19
+ self.perspective_flip_gamma_series = get_inbetweens(parse_key_frames(anim_args.perspective_flip_gamma), anim_args.max_frames)
20
+ self.perspective_flip_fv_series = get_inbetweens(parse_key_frames(anim_args.perspective_flip_fv), anim_args.max_frames)
21
+ self.noise_schedule_series = get_inbetweens(parse_key_frames(anim_args.noise_schedule), anim_args.max_frames)
22
+ self.strength_schedule_series = get_inbetweens(parse_key_frames(anim_args.strength_schedule), anim_args.max_frames)
23
+ self.contrast_schedule_series = get_inbetweens(parse_key_frames(anim_args.contrast_schedule), anim_args.max_frames)
24
+ self.cfg_scale_schedule_series = get_inbetweens(parse_key_frames(anim_args.cfg_scale_schedule), anim_args.max_frames)
25
+ self.pix2pix_img_cfg_scale_series = get_inbetweens(parse_key_frames(anim_args.pix2pix_img_cfg_scale_schedule), anim_args.max_frames)
26
+ self.subseed_schedule_series = get_inbetweens(parse_key_frames(anim_args.subseed_schedule), anim_args.max_frames)
27
+ self.subseed_strength_schedule_series = get_inbetweens(parse_key_frames(anim_args.subseed_strength_schedule), anim_args.max_frames)
28
+ self.checkpoint_schedule_series = get_inbetweens(parse_key_frames(anim_args.checkpoint_schedule), anim_args.max_frames, is_single_string = True)
29
+ self.steps_schedule_series = get_inbetweens(parse_key_frames(anim_args.steps_schedule), anim_args.max_frames)
30
+ self.seed_schedule_series = get_inbetweens(parse_key_frames(anim_args.seed_schedule), anim_args.max_frames)
31
+ self.sampler_schedule_series = get_inbetweens(parse_key_frames(anim_args.sampler_schedule), anim_args.max_frames, is_single_string = True)
32
+ self.clipskip_schedule_series = get_inbetweens(parse_key_frames(anim_args.clipskip_schedule), anim_args.max_frames)
33
+ self.mask_schedule_series = get_inbetweens(parse_key_frames(anim_args.mask_schedule), anim_args.max_frames, is_single_string = True)
34
+ self.noise_mask_schedule_series = get_inbetweens(parse_key_frames(anim_args.noise_mask_schedule), anim_args.max_frames, is_single_string = True)
35
+ self.kernel_schedule_series = get_inbetweens(parse_key_frames(anim_args.kernel_schedule), anim_args.max_frames)
36
+ self.sigma_schedule_series = get_inbetweens(parse_key_frames(anim_args.sigma_schedule), anim_args.max_frames)
37
+ self.amount_schedule_series = get_inbetweens(parse_key_frames(anim_args.amount_schedule), anim_args.max_frames)
38
+ self.threshold_schedule_series = get_inbetweens(parse_key_frames(anim_args.threshold_schedule), anim_args.max_frames)
39
+ self.fov_series = get_inbetweens(parse_key_frames(anim_args.fov_schedule), anim_args.max_frames)
40
+ self.near_series = get_inbetweens(parse_key_frames(anim_args.near_schedule), anim_args.max_frames)
41
+ self.far_series = get_inbetweens(parse_key_frames(anim_args.far_schedule), anim_args.max_frames)
42
+ self.hybrid_comp_alpha_schedule_series = get_inbetweens(parse_key_frames(anim_args.hybrid_comp_alpha_schedule), anim_args.max_frames)
43
+ self.hybrid_comp_mask_blend_alpha_schedule_series = get_inbetweens(parse_key_frames(anim_args.hybrid_comp_mask_blend_alpha_schedule), anim_args.max_frames)
44
+ self.hybrid_comp_mask_contrast_schedule_series = get_inbetweens(parse_key_frames(anim_args.hybrid_comp_mask_contrast_schedule), anim_args.max_frames)
45
+ self.hybrid_comp_mask_auto_contrast_cutoff_high_schedule_series = get_inbetweens(parse_key_frames(anim_args.hybrid_comp_mask_auto_contrast_cutoff_high_schedule), anim_args.max_frames)
46
+ self.hybrid_comp_mask_auto_contrast_cutoff_low_schedule_series = get_inbetweens(parse_key_frames(anim_args.hybrid_comp_mask_auto_contrast_cutoff_low_schedule), anim_args.max_frames)
47
+
48
+ class LooperAnimKeys():
49
+ def __init__(self, loop_args, anim_args):
50
+ self.use_looper = loop_args.use_looper
51
+ self.imagesToKeyframe = loop_args.init_images
52
+ self.image_strength_schedule_series = get_inbetweens(parse_key_frames(loop_args.image_strength_schedule), anim_args.max_frames)
53
+ self.blendFactorMax_series = get_inbetweens(parse_key_frames(loop_args.blendFactorMax), anim_args.max_frames)
54
+ self.blendFactorSlope_series = get_inbetweens(parse_key_frames(loop_args.blendFactorSlope), anim_args.max_frames)
55
+ self.tweening_frames_schedule_series = get_inbetweens(parse_key_frames(loop_args.tweening_frames_schedule), anim_args.max_frames)
56
+ self.color_correction_factor_series = get_inbetweens(parse_key_frames(loop_args.color_correction_factor), anim_args.max_frames)
57
+
58
+ def get_inbetweens(key_frames, max_frames, integer=False, interp_method='Linear', is_single_string = False):
59
+ key_frame_series = pd.Series([np.nan for a in range(max_frames)])
60
+ for i in range(0, max_frames):
61
+ if i in key_frames:
62
+ value = key_frames[i]
63
+ value_is_number = check_is_number(value)
64
+ # if it's only a number, leave the rest for the default interpolation
65
+ if value_is_number:
66
+ t = i
67
+ key_frame_series[i] = value
68
+ if not value_is_number:
69
+ t = i
70
+ if is_single_string:
71
+ if value.find("'") > -1:
72
+ value = value.replace("'","")
73
+ if value.find('"') > -1:
74
+ value = value.replace('"',"")
75
+ key_frame_series[i] = numexpr.evaluate(value) if not is_single_string else value # workaround for values formatted like 0:("I am test") //used for sampler schedules
76
+ key_frame_series = key_frame_series.astype(float) if not is_single_string else key_frame_series # as string
77
+
78
+ if interp_method == 'Cubic' and len(key_frames.items()) <= 3:
79
+ interp_method = 'Quadratic'
80
+ if interp_method == 'Quadratic' and len(key_frames.items()) <= 2:
81
+ interp_method = 'Linear'
82
+
83
+ key_frame_series[0] = key_frame_series[key_frame_series.first_valid_index()]
84
+ key_frame_series[max_frames-1] = key_frame_series[key_frame_series.last_valid_index()]
85
+ key_frame_series = key_frame_series.interpolate(method=interp_method.lower(), limit_direction='both')
86
+ if integer:
87
+ return key_frame_series.astype(int)
88
+ return key_frame_series
89
+
90
+ def parse_key_frames(string, prompt_parser=None):
91
+ # because math functions (i.e. sin(t)) can utilize brackets
92
+ # it extracts the value in form of some stuff
93
+ # which has previously been enclosed with brackets and
94
+ # with a comma or end of line existing after the closing one
95
+ pattern = r'((?P<frame>[0-9]+):[\s]*\((?P<param>[\S\s]*?)\)([,][\s]?|[\s]?$))'
96
+ frames = dict()
97
+ for match_object in re.finditer(pattern, string):
98
+ frame = int(match_object.groupdict()['frame'])
99
+ param = match_object.groupdict()['param']
100
+ if prompt_parser:
101
+ frames[frame] = prompt_parser(param)
102
+ else:
103
+ frames[frame] = param
104
+ if frames == {} and len(string) != 0:
105
+ raise RuntimeError('Key Frame string not correctly formatted')
106
+ return frames
extensions/deforum/scripts/deforum_helpers/args.py ADDED
@@ -0,0 +1,1214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.shared import cmd_opts
2
+ from modules.processing import get_fixed_seed
3
+ from modules.ui_components import FormRow
4
+ import modules.shared as sh
5
+ import modules.paths as ph
6
+ import os
7
+ from .frame_interpolation import set_interp_out_fps, gradio_f_interp_get_fps_and_fcount, process_interp_vid_upload_logic
8
+ from .upscaling import process_upscale_vid_upload_logic, process_ncnn_upscale_vid_upload_logic
9
+ from .video_audio_utilities import find_ffmpeg_binary, ffmpeg_stitch_video, direct_stitch_vid_from_frames, get_quick_vid_info, extract_number
10
+ from .gradio_funcs import *
11
+ from .general_utils import get_os
12
+ from .deforum_controlnet import controlnet_component_names, setup_controlnet_ui
13
+ import tempfile
14
+
15
+ def Root():
16
+ device = sh.device
17
+ models_path = ph.models_path + '/Deforum'
18
+ half_precision = not cmd_opts.no_half
19
+ mask_preset_names = ['everywhere','init_mask','video_mask']
20
+ p = None
21
+ frames_cache = []
22
+ initial_seed = None
23
+ initial_info = None
24
+ first_frame = None
25
+ outpath_samples = ""
26
+ animation_prompts = None
27
+ color_corrections = None
28
+ initial_clipskip = None
29
+ current_user_os = get_os()
30
+ tmp_deforum_run_duplicated_folder = os.path.join(tempfile.gettempdir(), 'tmp_run_deforum')
31
+ return locals()
32
+
33
+ def DeforumAnimArgs():
34
+
35
+ #@markdown ####**Animation:**
36
+ animation_mode = '2D' #@param ['None', '2D', '3D', 'Video Input', 'Interpolation'] {type:'string'}
37
+ max_frames = 120 #@param {type:"number"}
38
+ border = 'replicate' #@param ['wrap', 'replicate'] {type:'string'}
39
+ #@markdown ####**Motion Parameters:**
40
+ angle = "0:(0)"#@param {type:"string"}
41
+ zoom = "0:(1.0025+0.002*sin(1.25*3.14*t/30))"#@param {type:"string"}
42
+ translation_x = "0:(0)"#@param {type:"string"}
43
+ translation_y = "0:(0)"#@param {type:"string"}
44
+ translation_z = "0:(1.75)"#@param {type:"string"}
45
+ rotation_3d_x = "0:(0)"#@param {type:"string"}
46
+ rotation_3d_y = "0:(0)"#@param {type:"string"}
47
+ rotation_3d_z = "0:(0)"#@param {type:"string"}
48
+ enable_perspective_flip = False #@param {type:"boolean"}
49
+ perspective_flip_theta = "0:(0)"#@param {type:"string"}
50
+ perspective_flip_phi = "0:(0)"#@param {type:"string"}
51
+ perspective_flip_gamma = "0:(0)"#@param {type:"string"}
52
+ perspective_flip_fv = "0:(53)"#@param {type:"string"}
53
+ noise_schedule = "0: (0.065)"#@param {type:"string"}
54
+ strength_schedule = "0: (0.65)"#@param {type:"string"}
55
+ contrast_schedule = "0: (1.0)"#@param {type:"string"}
56
+ cfg_scale_schedule = "0: (7)"
57
+ enable_steps_scheduling = False#@param {type:"boolean"}
58
+ steps_schedule = "0: (25)"#@param {type:"string"}
59
+ fov_schedule = "0: (70)"
60
+ near_schedule = "0: (200)"
61
+ far_schedule = "0: (10000)"
62
+ seed_schedule = "0:(5), 1:(-1), 219:(-1), 220:(5)"
63
+ pix2pix_img_cfg_scale = "1.5"
64
+ pix2pix_img_cfg_scale_schedule = "0:(1.5)"
65
+ enable_subseed_scheduling = False
66
+ subseed_schedule = "0:(1)"
67
+ subseed_strength_schedule = "0:(0)"
68
+
69
+ # Sampler Scheduling
70
+ enable_sampler_scheduling = False #@param {type:"boolean"}
71
+ sampler_schedule = '0: ("Euler a")'
72
+
73
+ # Composable mask scheduling
74
+ use_noise_mask = False
75
+ mask_schedule = '0: ("!({everywhere}^({init_mask}|{video_mask}) ) ")'
76
+ noise_mask_schedule = '0: ("!({everywhere}^({init_mask}|{video_mask}) ) ")'
77
+ # Checkpoint Scheduling
78
+ enable_checkpoint_scheduling = False#@param {type:"boolean"}
79
+ checkpoint_schedule = '0: ("model1.ckpt"), 100: ("model2.ckpt")'
80
+
81
+ # CLIP skip Scheduling
82
+ enable_clipskip_scheduling = False #@param {type:"boolean"}
83
+ clipskip_schedule = '0: (2)'
84
+
85
+ # Anti-blur
86
+ kernel_schedule = "0: (5)"
87
+ sigma_schedule = "0: (1.0)"
88
+ amount_schedule = "0: (0.35)"
89
+ threshold_schedule = "0: (0.0)"
90
+ # Hybrid video
91
+ hybrid_comp_alpha_schedule = "0:(1)" #@param {type:"string"}
92
+ hybrid_comp_mask_blend_alpha_schedule = "0:(0.5)" #@param {type:"string"}
93
+ hybrid_comp_mask_contrast_schedule = "0:(1)" #@param {type:"string"}
94
+ hybrid_comp_mask_auto_contrast_cutoff_high_schedule = "0:(100)" #@param {type:"string"}
95
+ hybrid_comp_mask_auto_contrast_cutoff_low_schedule = "0:(0)" #@param {type:"string"}
96
+
97
+ #@markdown ####**Coherence:**
98
+ color_coherence = 'Match Frame 0 LAB' #@param ['None', 'Match Frame 0 HSV', 'Match Frame 0 LAB', 'Match Frame 0 RGB', 'Video Input'] {type:'string'}
99
+ color_coherence_video_every_N_frames = 1 #@param {type:"integer"}
100
+ color_force_grayscale = False #@param {type:"boolean"}
101
+ diffusion_cadence = '2' #@param ['1','2','3','4','5','6','7','8'] {type:'string'}
102
+
103
+ #@markdown ####**Noise settings:**
104
+ noise_type = 'perlin' #@param ['uniform', 'perlin'] {type:'string'}
105
+ # Perlin params
106
+ perlin_w = 8 #@param {type:"number"}
107
+ perlin_h = 8 #@param {type:"number"}
108
+ perlin_octaves = 4 #@param {type:"number"}
109
+ perlin_persistence = 0.5 #@param {type:"number"}
110
+
111
+ #@markdown ####**3D Depth Warping:**
112
+ use_depth_warping = True #@param {type:"boolean"}
113
+ midas_weight = 0.2 #@param {type:"number"}
114
+
115
+ padding_mode = 'border'#@param ['border', 'reflection', 'zeros'] {type:'string'}
116
+ sampling_mode = 'bicubic'#@param ['bicubic', 'bilinear', 'nearest'] {type:'string'}
117
+ save_depth_maps = False #@param {type:"boolean"}
118
+
119
+ #@markdown ####**Video Input:**
120
+ video_init_path ='https://github.com/hithereai/d/releases/download/m/vid.mp4' #@param {type:"string"}
121
+ extract_nth_frame = 1#@param {type:"number"}
122
+ extract_from_frame = 0 #@param {type:"number"}
123
+ extract_to_frame = -1 #@param {type:"number"} minus 1 for unlimited frames
124
+ overwrite_extracted_frames = True #@param {type:"boolean"}
125
+ use_mask_video = False #@param {type:"boolean"}
126
+ video_mask_path ='/content/video_in.mp4'#@param {type:"string"}
127
+
128
+ #@markdown ####**Hybrid Video for 2D/3D Animation Mode:**
129
+ hybrid_generate_inputframes = False #@param {type:"boolean"}
130
+ hybrid_generate_human_masks = "None" #@param ['None','PNGs','Video', 'Both']
131
+ hybrid_use_first_frame_as_init_image = True #@param {type:"boolean"}
132
+ hybrid_motion = "None" #@param ['None','Optical Flow','Perspective','Affine']
133
+ hybrid_motion_use_prev_img = False #@param {type:"boolean"}
134
+ hybrid_flow_method = "Farneback" #@param ['DIS Medium','Farneback']
135
+ hybrid_composite = False #@param {type:"boolean"}
136
+ hybrid_comp_mask_type = "None" #@param ['None', 'Depth', 'Video Depth', 'Blend', 'Difference']
137
+ hybrid_comp_mask_inverse = False #@param {type:"boolean"}
138
+ hybrid_comp_mask_equalize = "None" #@param ['None','Before','After','Both']
139
+ hybrid_comp_mask_auto_contrast = False #@param {type:"boolean"}
140
+ hybrid_comp_save_extra_frames = False #@param {type:"boolean"}
141
+
142
+ #@markdown ####**Resume Animation:**
143
+ resume_from_timestring = False #@param {type:"boolean"}
144
+ resume_timestring = "20220829210106" #@param {type:"string"}
145
+
146
+ return locals()
147
+
148
+ # def DeforumPrompts():
149
+ # return
150
+
151
+ def DeforumAnimPrompts():
152
+ return r"""{
153
+ "0": "tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera",
154
+ "30": "anthropomorphic clean cat, surrounded by fractals, epic angle and pose, symmetrical, 3d, depth of field, ruan jia and fenghua zhong",
155
+ "60": "a beautiful coconut --neg photo, realistic",
156
+ "90": "a beautiful durian, trending on Artstation"
157
+ }
158
+ """
159
+
160
+ def DeforumArgs():
161
+ #@markdown **Image Settings**
162
+ W = 512 #@param
163
+ H = 512 #@param
164
+ W, H = map(lambda x: x - x % 64, (W, H)) # resize to integer multiple of 64
165
+
166
+ #@markdonw **Webui stuff**
167
+ tiling = False
168
+ restore_faces = False
169
+ seed_enable_extras = False
170
+ subseed = -1
171
+ subseed_strength = 0
172
+ seed_resize_from_w = 0
173
+ seed_resize_from_h = 0
174
+
175
+ #@markdown **Sampling Settings**
176
+ seed = -1 #@param
177
+ sampler = 'euler_ancestral' #@param ["klms","dpm2","dpm2_ancestral","heun","euler","euler_ancestral","plms", "ddim"]
178
+ steps = 25 #@param
179
+ scale = 7 #@param
180
+ ddim_eta = 0.0 #@param
181
+ dynamic_threshold = None
182
+ static_threshold = None
183
+
184
+ #@markdown **Save & Display Settings**
185
+ save_samples = True #@param {type:"boolean"}
186
+ save_settings = True #@param {type:"boolean"}
187
+ display_samples = True #@param {type:"boolean"}
188
+ save_sample_per_step = False #@param {type:"boolean"}
189
+ show_sample_per_step = False #@param {type:"boolean"}
190
+
191
+ #@markdown **Prompt Settings**
192
+ prompt_weighting = False #@param {type:"boolean"}
193
+ normalize_prompt_weights = True #@param {type:"boolean"}
194
+ log_weighted_subprompts = False #@param {type:"boolean"}
195
+
196
+ #@markdown **Batch Settings**
197
+ n_batch = 1 #@param
198
+ batch_name = "Deforum" #@param {type:"string"}
199
+ filename_format = "{timestring}_{index}_{prompt}.png" #@param ["{timestring}_{index}_{seed}.png","{timestring}_{index}_{prompt}.png"]
200
+ seed_behavior = "iter" #@param ["iter","fixed","random","ladder","alternate","schedule"]
201
+ seed_iter_N = 1 #@param {type:'integer'}
202
+ # make_grid = False #@param {type:"boolean"}
203
+ # grid_rows = 2 #@param
204
+ outdir = ""#get_output_folder(output_path, batch_name)
205
+
206
+ #@markdown **Init Settings**
207
+ use_init = False #@param {type:"boolean"}
208
+ strength = 0.0 #@param {type:"number"}
209
+ strength_0_no_init = True # Set the strength to 0 automatically when no init image is used
210
+ init_image = "https://github.com/hithereai/d/releases/download/m/kaba.png" #@param {type:"string"}
211
+ # Whiter areas of the mask are areas that change more
212
+ use_mask = False #@param {type:"boolean"}
213
+ use_alpha_as_mask = False # use the alpha channel of the init image as the mask
214
+ mask_file = "https://github.com/hithereai/d/releases/download/m/mask.jpg" #@param {type:"string"}
215
+ invert_mask = False #@param {type:"boolean"}
216
+ # Adjust mask image, 1.0 is no adjustment. Should be positive numbers.
217
+ mask_contrast_adjust = 1.0 #@param {type:"number"}
218
+ mask_brightness_adjust = 1.0 #@param {type:"number"}
219
+ # Overlay the masked image at the end of the generation so it does not get degraded by encoding and decoding
220
+ overlay_mask = True # {type:"boolean"}
221
+ # Blur edges of final overlay mask, if used. Minimum = 0 (no blur)
222
+ mask_overlay_blur = 4 # {type:"number"}
223
+
224
+ fill = 1 #MASKARGSEXPANSION Todo : Rename and convert to same formatting as used in img2img masked content
225
+ full_res_mask = True
226
+ full_res_mask_padding = 4
227
+ reroll_blank_frames = 'reroll' # reroll, interrupt, or ignore
228
+
229
+ n_samples = 1 # doesnt do anything
230
+ precision = 'autocast'
231
+ C = 4
232
+ f = 8
233
+
234
+ prompt = ""
235
+ timestring = ""
236
+ init_latent = None
237
+ init_sample = None
238
+ init_c = None
239
+ mask_image = None
240
+ noise_mask = None
241
+ seed_internal = 0
242
+
243
+ return locals()
244
+
245
+ def keyframeExamples():
246
+ return '''{
247
+ "0": "https://user-images.githubusercontent.com/121192995/215279228-1673df8a-f919-4380-b04c-19379b2041ff.png",
248
+ "50": "https://user-images.githubusercontent.com/121192995/215279281-7989fd6f-4b9b-4d90-9887-b7960edd59f8.png",
249
+ "100": "https://user-images.githubusercontent.com/121192995/215279284-afc14543-d220-4142-bbf4-503776ca2b8b.png",
250
+ "150": "https://user-images.githubusercontent.com/121192995/215279286-23378635-85b3-4457-b248-23e62c048049.jpg",
251
+ "200": "https://user-images.githubusercontent.com/121192995/215279228-1673df8a-f919-4380-b04c-19379b2041ff.png"
252
+ }'''
253
+
254
+ def LoopArgs():
255
+ use_looper = False
256
+ init_images = keyframeExamples()
257
+ image_strength_schedule = "0:(0.75)"
258
+ blendFactorMax = "0:(0.35)"
259
+ blendFactorSlope = "0:(0.25)"
260
+ tweening_frames_schedule = "0:(20)"
261
+ color_correction_factor = "0:(0.075)"
262
+ return locals()
263
+
264
+ def ParseqArgs():
265
+ parseq_manifest = None
266
+ parseq_use_deltas = True
267
+ return locals()
268
+
269
+ def DeforumOutputArgs():
270
+ skip_video_for_run_all = False #@param {type: 'boolean'}
271
+ fps = 15 #@param {type:"number"}
272
+ make_gif = False
273
+ image_path = "C:/SD/20230124234916_%05d.png" #@param {type:"string"}
274
+ mp4_path = "testvidmanualsettings.mp4" #@param {type:"string"}
275
+ ffmpeg_location = find_ffmpeg_binary()
276
+ ffmpeg_crf = '17'
277
+ ffmpeg_preset = 'slow'
278
+ add_soundtrack = 'None' #@param ["File","Init Video"]
279
+ soundtrack_path = "https://freetestdata.com/wp-content/uploads/2021/09/Free_Test_Data_1MB_MP3.mp3"
280
+ # End-Run upscaling
281
+ r_upscale_video = False
282
+ r_upscale_factor = 'x2' # ['2x', 'x3', 'x4']
283
+ # **model below** - 'realesr-animevideov3' (default of realesrgan engine, does 2-4x), the rest do only 4x: 'realesrgan-x4plus', 'realesrgan-x4plus-anime'
284
+ r_upscale_model = 'realesr-animevideov3'
285
+ r_upscale_keep_imgs = True
286
+
287
+ render_steps = False #@param {type: 'boolean'}
288
+ path_name_modifier = "x0_pred" #@param ["x0_pred","x"]
289
+ # max_video_frames = 200 #@param {type:"string"}
290
+ store_frames_in_ram = False #@param {type: 'boolean'}
291
+ #@markdown **Interpolate Video Settings**
292
+ # todo: change them to support FILM interpolation as well
293
+ frame_interpolation_engine = "None" #@param ["None", "RIFE v4.6", "FILM"]
294
+ frame_interpolation_x_amount = 2 # [2 to 1000 depends on the engine]
295
+ frame_interpolation_slow_mo_enabled = False
296
+ frame_interpolation_slow_mo_amount = 2 #@param [2 to 10]
297
+ frame_interpolation_keep_imgs = False #@param {type: 'boolean'}
298
+ return locals()
299
+
300
+ import gradio as gr
301
+ import os
302
+ import time
303
+ from types import SimpleNamespace
304
+
305
+ i1_store_backup = "<p style=\"text-align:center;font-weight:bold;margin-bottom:0em\">Deforum extension for auto1111 — version 2.2b</p>"
306
+ i1_store = i1_store_backup
307
+
308
+ mask_fill_choices=['fill', 'original', 'latent noise', 'latent nothing']
309
+
310
+ def setup_deforum_setting_dictionary(self, is_img2img, is_extension = True):
311
+ d = SimpleNamespace(**DeforumArgs()) #default args
312
+ da = SimpleNamespace(**DeforumAnimArgs()) #default anim args
313
+ dp = SimpleNamespace(**ParseqArgs()) #default parseq ars
314
+ dv = SimpleNamespace(**DeforumOutputArgs()) #default video args
315
+ dr = SimpleNamespace(**Root()) # ROOT args
316
+ dloopArgs = SimpleNamespace(**LoopArgs())
317
+ if not is_extension:
318
+ with gr.Row():
319
+ btn = gr.Button("Click here after the generation to show the video")
320
+ with gr.Row():
321
+ i1 = gr.HTML(i1_store, elem_id='deforum_header')
322
+ else:
323
+ btn = i1 = gr.HTML("")
324
+
325
+ # MAIN (TOP) EXTENSION INFO ACCORD
326
+ with gr.Accordion("Info, Links and Help", open=False, elem_id='main_top_info_accord'):
327
+ gr.HTML("""<strong>Made by <a href="https://deforum.github.io">deforum.github.io</a>, port for AUTOMATIC1111's webui maintained by <a href="https://github.com/kabachuha">kabachuha</a></strong>""")
328
+ gr.HTML("""<a style="color:SteelBlue" href="https://github.com/deforum-art/deforum-for-automatic1111-webui/wiki/FAQ-&-Troubleshooting">FOR HELP CLICK HERE</a""", elem_id="for_help_click_here")
329
+ gr.HTML("""<ul style="list-style-type:circle; margin-left:1em">
330
+ <li>The code for this extension: <a style="color:SteelBlue" href="https://github.com/deforum-art/deforum-for-automatic1111-webui">here</a>.</li>
331
+ <li>Join the <a style="color:SteelBlue" href="https://discord.gg/deforum">official Deforum Discord</a> to share your creations and suggestions.</li>
332
+ <li>Official Deforum Wiki: <a style="color:SteelBlue" href="https://github.com/deforum-art/deforum-for-automatic1111-webui/wiki">here</a>.</li>
333
+ <li>Anime-inclined great guide (by FizzleDorf) with lots of examples: <a style="color:SteelBlue" href="https://rentry.org/AnimAnon-Deforum">here</a>.</li>
334
+ <li>For advanced keyframing with Math functions, see <a style="color:SteelBlue" href="https://github.com/deforum-art/deforum-for-automatic1111-webui/wiki/Maths-in-Deforum">here</a>.</li>
335
+ <li>Alternatively, use <a style="color:SteelBlue" href="https://sd-parseq.web.app/deforum">sd-parseq</a> as a UI to define your animation schedules (see the Parseq section in the Keyframes tab).</li>
336
+ <li><a style="color:SteelBlue" href="https://www.framesync.xyz/">framesync.xyz</a> is also a good option, it makes compact math formulae for Deforum keyframes by selecting various waveforms.</li>
337
+ <li>The other site allows for making keyframes using <a style="color:SteelBlue" href="https://www.chigozie.co.uk/keyframe-string-generator/">interactive splines and Bezier curves</a> (select Disco output format).</li>
338
+ <li>If you want to use Width/Height which are not multiples of 64, please change noise_type to 'Uniform', in Keyframes --> Noise.</li>
339
+ </ul>
340
+ <italic>If you liked this extension, please <a style="color:SteelBlue" href="https://github.com/deforum-art/deforum-for-automatic1111-webui">give it a star on GitHub</a>!</italic> 😊""")
341
+ if not is_extension:
342
+ def show_vid():
343
+ return {
344
+ i1: gr.update(value=i1_store, visible=True)
345
+ }
346
+
347
+ btn.click(
348
+ show_vid,
349
+ [],
350
+ [i1]
351
+ )
352
+
353
+ with gr.Blocks():
354
+ # RUN TAB
355
+ with gr.Tab('Run'):
356
+ from modules.sd_samplers import samplers_for_img2img
357
+ with gr.Row(variant='compact'):
358
+ sampler = gr.Dropdown(label="Sampler", choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="value", elem_id="sampler", interactive=True)
359
+ steps = gr.Slider(label="Steps", minimum=0, maximum=200, step=1, value=d.steps, interactive=True)
360
+ with gr.Row(variant='compact'):
361
+ W = gr.Slider(label="Width", minimum=64, maximum=2048, step=64, value=d.W, interactive=True)
362
+ H = gr.Slider(label="Height", minimum=64, maximum=2048, step=64, value=d.H, interactive=True)
363
+ with gr.Row(variables='compact'):
364
+ seed = gr.Number(label="Seed", value=d.seed, interactive=True, precision=0)
365
+ batch_name = gr.Textbox(label="Batch name", lines=1, interactive=True, value = d.batch_name)
366
+ with gr.Accordion('Restore Faces, Tiling & more', open=False) as run_more_settings_accord:
367
+ with gr.Row(variant='compact'):
368
+ restore_faces = gr.Checkbox(label='Restore Faces', value=d.restore_faces)
369
+ tiling = gr.Checkbox(label='Tiling', value=False)
370
+ ddim_eta = gr.Number(label="DDIM Eta", value=d.ddim_eta, interactive=True)
371
+ with gr.Row() as pix2pix_img_cfg_scale_row:
372
+ pix2pix_img_cfg_scale_schedule = gr.Textbox(label="Pix2Pix img CFG schedule", value=da.pix2pix_img_cfg_scale_schedule, interactive=True)
373
+ # RUN FROM SETTING FILE ACCORD
374
+ with gr.Accordion('Resume & Run from file', open=False):
375
+ with gr.Tab('Run from Settings file'):
376
+ with gr.Row(variant='compact'):
377
+ override_settings_with_file = gr.Checkbox(label="Override settings", value=False, interactive=True, elem_id='override_settings')
378
+ custom_settings_file = gr.Textbox(label="Custom settings file", lines=1, interactive=True, elem_id='custom_settings_file')
379
+ # RESUME ANIMATION ACCORD
380
+ with gr.Tab('Resume Animation'):
381
+ with gr.Row(variant='compact'):
382
+ resume_from_timestring = gr.Checkbox(label="Resume from timestring", value=da.resume_from_timestring, interactive=True)
383
+ resume_timestring = gr.Textbox(label="Resume timestring", lines=1, value = da.resume_timestring, interactive=True)
384
+ # KEYFRAMES TAB
385
+ with gr.Tab('Keyframes'): #TODO make a some sort of the original dictionary parsing
386
+ with gr.Row(variant='compact'):
387
+ with gr.Column(scale=2):
388
+ animation_mode = gr.Radio(['2D', '3D', 'Interpolation', 'Video Input'], label="Animation mode", value=da.animation_mode, elem_id="animation_mode")
389
+ with gr.Column(scale=1, min_width=180):
390
+ border = gr.Radio(['replicate', 'wrap'], label="Border", value=da.border, elem_id="border")
391
+ with gr.Row(variant='compact'):
392
+ diffusion_cadence = gr.Slider(label="Cadence", minimum=1, maximum=50, step=1, value=da.diffusion_cadence, interactive=True)
393
+ max_frames = gr.Number(label="Max frames", lines=1, value = da.max_frames, interactive=True, precision=0)
394
+ # GUIDED IMAGES ACCORD
395
+ with gr.Accordion('Guided Images', open=False, elem_id='guided_images_accord') as guided_images_accord:
396
+ # GUIDED IMAGES INFO ACCORD
397
+ with gr.Accordion('*READ ME before you use this mode!*', open=False):
398
+ gr.HTML("""You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field.
399
+ Set the keyframes and the images that you want to show up.
400
+ Note: the number of frames between each keyframe should be greater than the tweening frames.""")
401
+ # In later versions this should be also in the strength schedule, but for now you need to set it.
402
+ gr.HTML("""Prerequisites and Important Info:
403
+ <ul style="list-style-type:circle; margin-left:2em; margin-bottom:0em">
404
+ <li>This mode works ONLY with 2D/3D animation modes. Interpolation and Video Input modes aren't supported.</ li>
405
+ <li>Set Init tab's strength slider greater than 0. Recommended value (.65 - .80).</ li>
406
+ <li>Set 'seed_behavior' to 'schedule' under the Seed Scheduling section below.</li>
407
+ </ul>
408
+ """)
409
+ gr.HTML("""Looping recommendations:
410
+ <ul style="list-style-type:circle; margin-left:2em; margin-bottom:0em">
411
+ <li>seed_schedule should start and end on the same seed. <br />
412
+ Example: seed_schedule could use 0:(5), 1:(-1), 219:(-1), 220:(5)</li>
413
+ <li>The 1st and last keyframe images should match.</li>
414
+ <li>Set your total number of keyframes to be 21 more than the last inserted keyframe image. <br />
415
+ Example: Default args should use 221 as total keyframes.</li>
416
+ <li>Prompts are stored in JSON format. If you've got an error, check it in validator, <a style="color:SteelBlue" href="https://odu.github.io/slingjsonlint/">like here</a></li>
417
+ </ul>
418
+ """)
419
+ with gr.Row():
420
+ use_looper = gr.Checkbox(label="Enable guided images mode", value=dloopArgs.use_looper, interactive=True)
421
+ with gr.Row():
422
+ init_images = gr.Textbox(label="Images to use for keyframe guidance", lines=9, value = keyframeExamples(), interactive=True)
423
+ # GUIDED IMAGES SCHEDULES ACCORD
424
+ with gr.Accordion('Guided images schedules', open=False):
425
+ with gr.Row():
426
+ image_strength_schedule = gr.Textbox(label="Image strength schedule", lines=1, value = dloopArgs.image_strength_schedule, interactive=True)
427
+ with gr.Row():
428
+ blendFactorMax = gr.Textbox(label="Blend factor max", lines=1, value = dloopArgs.blendFactorMax, interactive=True)
429
+ with gr.Row():
430
+ blendFactorSlope = gr.Textbox(label="Blend factor slope", lines=1, value = dloopArgs.blendFactorSlope, interactive=True)
431
+ with gr.Row():
432
+ tweening_frames_schedule = gr.Textbox(label="Tweening frames schedule", lines=1, value = dloopArgs.tweening_frames_schedule, interactive=True)
433
+ with gr.Row():
434
+ color_correction_factor = gr.Textbox(label="Color correction factor", lines=1, value = dloopArgs.color_correction_factor, interactive=True)
435
+ # EXTA SCHEDULES TABS
436
+ with gr.Tabs(elem_id='extra_schedules'):
437
+ with gr.TabItem('Strength'):
438
+ strength_schedule = gr.Textbox(label="Strength schedule", lines=1, value = da.strength_schedule, interactive=True)
439
+ with gr.TabItem('CFG'):
440
+ cfg_scale_schedule = gr.Textbox(label="CFG scale schedule", lines=1, value = da.cfg_scale_schedule, interactive=True)
441
+ with gr.TabItem('Seed') as a3:
442
+ with gr.Row():
443
+ seed_behavior = gr.Radio(['iter', 'fixed', 'random', 'ladder', 'alternate', 'schedule'], label="Seed behavior", value=d.seed_behavior, elem_id="seed_behavior")
444
+ with gr.Row() as seed_iter_N_row:
445
+ seed_iter_N = gr.Number(label="Seed iter N", value=d.seed_iter_N, interactive=True, precision=0)
446
+ with gr.Row(visible=False) as seed_schedule_row:
447
+ seed_schedule = gr.Textbox(label="Seed schedule", lines=1, value = da.seed_schedule, interactive=True)
448
+ with gr.TabItem('SubSeed', open=False) as subseed_sch_tab:
449
+ enable_subseed_scheduling = gr.Checkbox(label="Enable Subseed scheduling", value=da.enable_subseed_scheduling, interactive=True)
450
+ subseed_schedule = gr.Textbox(label="Subseed schedule", lines=1, value = da.subseed_schedule, interactive=True)
451
+ subseed_strength_schedule = gr.Textbox(label="Subseed strength schedule", lines=1, value = da.subseed_strength_schedule, interactive=True)
452
+ with gr.Row(variant='compact'):
453
+ seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
454
+ seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
455
+ # Steps Scheduling
456
+ with gr.TabItem('Step') as a13:
457
+ with gr.Row():
458
+ enable_steps_scheduling = gr.Checkbox(label="Enable steps scheduling", value=da.enable_steps_scheduling, interactive=True)
459
+ with gr.Row():
460
+ steps_schedule = gr.Textbox(label="Steps schedule", lines=1, value = da.steps_schedule, interactive=True)
461
+ # Sampler Scheduling
462
+ with gr.TabItem('Sampler') as a14:
463
+ with gr.Row():
464
+ enable_sampler_scheduling = gr.Checkbox(label="Enable sampler scheduling", value=da.enable_sampler_scheduling, interactive=True)
465
+ with gr.Row():
466
+ sampler_schedule = gr.Textbox(label="Sampler schedule", lines=1, value = da.sampler_schedule, interactive=True)
467
+ # Checkpoint Scheduling
468
+ with gr.TabItem('Checkpoint') as a15:
469
+ with gr.Row():
470
+ enable_checkpoint_scheduling = gr.Checkbox(label="Enable checkpoint scheduling", value=da.enable_checkpoint_scheduling, interactive=True)
471
+ with gr.Row():
472
+ checkpoint_schedule = gr.Textbox(label="Checkpoint schedule", lines=1, value = da.checkpoint_schedule, interactive=True)
473
+ with gr.TabItem('CLIP Skip', open=False) as a16:
474
+ with gr.Row():
475
+ enable_clipskip_scheduling = gr.Checkbox(label="Enable CLIP skip scheduling", value=da.enable_clipskip_scheduling, interactive=True)
476
+ with gr.Row():
477
+ clipskip_schedule = gr.Textbox(label="CLIP skip schedule", lines=1, value = da.clipskip_schedule, interactive=True)
478
+ # MOTION INNER TAB
479
+ with gr.Tab('Motion') as motion_tab:
480
+ with gr.Column(visible=True) as only_2d_motion_column:
481
+ with gr.Row(variant='compact'):
482
+ angle = gr.Textbox(label="Angle", lines=1, value = da.angle, interactive=True)
483
+ with gr.Row(variant='compact'):
484
+ zoom = gr.Textbox(label="Zoom", lines=1, value = da.zoom, interactive=True)
485
+ with gr.Column(visible=True) as both_anim_mode_motion_params_column:
486
+ with gr.Row(variant='compact'):
487
+ translation_x = gr.Textbox(label="Translation X", lines=1, value = da.translation_x, interactive=True)
488
+ with gr.Row(variant='compact'):
489
+ translation_y = gr.Textbox(label="Translation Y", lines=1, value = da.translation_y, interactive=True)
490
+ with gr.Column(visible=False) as only_3d_motion_column:
491
+ with gr.Row(variant='compact'):
492
+ translation_z = gr.Textbox(label="Translation Z", lines=1, value = da.translation_z, interactive=True)
493
+ with gr.Row(variant='compact'):
494
+ rotation_3d_x = gr.Textbox(label="Rotation 3D X", lines=1, value = da.rotation_3d_x, interactive=True)
495
+ with gr.Row(variant='compact'):
496
+ rotation_3d_y = gr.Textbox(label="Rotation 3D Y", lines=1, value = da.rotation_3d_y, interactive=True)
497
+ with gr.Row(variant='compact'):
498
+ rotation_3d_z = gr.Textbox(label="Rotation 3D Z", lines=1, value = da.rotation_3d_z, interactive=True)
499
+ # 3D DEPTH & FOV ACCORD
500
+ with gr.Accordion('Depth Warping & FOV', visible=False, open=False) as depth_3d_warping_accord:
501
+ with gr.Tab('Depth Warping'):
502
+ with gr.Row(variant='compact'):
503
+ use_depth_warping = gr.Checkbox(label="Use depth warping", value=da.use_depth_warping, interactive=True)
504
+ midas_weight = gr.Number(label="MiDaS weight", value=da.midas_weight, interactive=True)
505
+ with gr.Row(variant='compact'):
506
+ padding_mode = gr.Radio(['border', 'reflection', 'zeros'], label="Padding mode", value=da.padding_mode, elem_id="padding_mode")
507
+ sampling_mode = gr.Radio(['bicubic', 'bilinear', 'nearest'], label="Sampling mode", value=da.sampling_mode, elem_id="sampling_mode")
508
+ with gr.Tab('Field Of View', visible=False, open=False) as fov_accord:
509
+ with gr.Row(variant='compact'):
510
+ fov_schedule = gr.Textbox(label="FOV schedule", lines=1, value = da.fov_schedule, interactive=True)
511
+ with gr.Row():
512
+ near_schedule = gr.Textbox(label="Near schedule", lines=1, value = da.near_schedule, interactive=True)
513
+ with gr.Row():
514
+ far_schedule = gr.Textbox(label="Far schedule", lines=1, value = da.far_schedule, interactive=True)
515
+ # PERSPECTIVE FLIP ACCORD
516
+ with gr.Accordion('Perspective Flip', open=False) as perspective_flip_accord:
517
+ with gr.Row():
518
+ enable_perspective_flip = gr.Checkbox(label="Enable perspective flip", value=da.enable_perspective_flip, interactive=True)
519
+ with gr.Row():
520
+ perspective_flip_theta = gr.Textbox(label="Perspective flip theta", lines=1, value = da.perspective_flip_theta, interactive=True)
521
+ with gr.Row():
522
+ perspective_flip_phi = gr.Textbox(label="Perspective flip phi", lines=1, value = da.perspective_flip_phi, interactive=True)
523
+ with gr.Row():
524
+ perspective_flip_gamma = gr.Textbox(label="Perspective flip gamma", lines=1, value = da.perspective_flip_gamma, interactive=True)
525
+ with gr.Row():
526
+ perspective_flip_fv = gr.Textbox(label="Perspective flip fv", lines=1, value = da.perspective_flip_fv, interactive=True)
527
+ # NOISE INNER TAB
528
+ with gr.Tab('Noise', open=True) as a8:
529
+ with gr.Row():
530
+ noise_type = gr.Radio(['uniform', 'perlin'], label="Noise type", value=da.noise_type, elem_id="noise_type")
531
+ with gr.Row():
532
+ noise_schedule = gr.Textbox(label="Noise schedule", lines=1, value = da.noise_schedule, interactive=True)
533
+ with gr.Row() as perlin_row:
534
+ with gr.Column(min_width=220):
535
+ perlin_octaves = gr.Slider(label="Perlin octaves", minimum=1, maximum=7, value=da.perlin_octaves, step=1, interactive=True)
536
+ with gr.Column(min_width=220):
537
+ perlin_persistence = gr.Slider(label="Perlin persistence", minimum=0, maximum=1, value=da.perlin_persistence, step=0.02, interactive=True)
538
+ # COHERENCE INNER TAB
539
+ with gr.Tab('Coherence', open=False) as coherence_accord:
540
+ with gr.Row(equal_height=True):
541
+ # Future TODO: remove 'match frame 0' prefix (after we manage the deprecated-names settings import), then convert from Dropdown to Radio!
542
+ color_coherence = gr.Dropdown(label="Color coherence", choices=['None', 'Match Frame 0 HSV', 'Match Frame 0 LAB', 'Match Frame 0 RGB', 'Video Input'], value=da.color_coherence, type="value", elem_id="color_coherence", interactive=True)
543
+ with gr.Column() as force_grayscale_column:
544
+ color_force_grayscale = gr.Checkbox(label="Color force Grayscale", value=da.color_force_grayscale, interactive=True)
545
+ with gr.Row(visible=False) as color_coherence_video_every_N_frames_row:
546
+ color_coherence_video_every_N_frames = gr.Number(label="Color coherence video every N frames", value=1, interactive=True)
547
+ with gr.Row():
548
+ contrast_schedule = gr.Textbox(label="Contrast schedule", lines=1, value = da.contrast_schedule, interactive=True)
549
+ with gr.Row():
550
+ # what to do with blank frames (they may result from glitches or the NSFW filter being turned on): reroll with +1 seed, interrupt the animation generation, or do nothing
551
+ reroll_blank_frames = gr.Radio(['reroll', 'interrupt', 'ignore'], label="Reroll blank frames", value=d.reroll_blank_frames, elem_id="reroll_blank_frames")
552
+ # ANTI BLUR INNER TAB
553
+ with gr.Tab('Anti Blur', open=False, elem_id='anti_blur_accord') as anti_blur_tab:
554
+ with gr.Row(variant='compact'):
555
+ kernel_schedule = gr.Textbox(label="Kernel schedule", lines=1, value = da.kernel_schedule, interactive=True)
556
+ with gr.Row(variant='compact'):
557
+ sigma_schedule = gr.Textbox(label="Sigma schedule", lines=1, value = da.sigma_schedule, interactive=True)
558
+ with gr.Row(variant='compact'):
559
+ amount_schedule = gr.Textbox(label="Amount schedule", lines=1, value = da.amount_schedule, interactive=True)
560
+ with gr.Row(variant='compact'):
561
+ threshold_schedule = gr.Textbox(label="Threshold schedule", lines=1, value = da.threshold_schedule, interactive=True)
562
+ # PROMPTS TAB
563
+ with gr.Tab('Prompts'):
564
+ # PROMPTS INFO ACCORD
565
+ with gr.Accordion(label='*Important* notes on Prompts', elem_id='prompts_info_accord', open=False, visible=True) as prompts_info_accord:
566
+ gr.HTML("""
567
+ <ul style="list-style-type:circle; margin-left:0.75em; margin-bottom:0.2em">
568
+ <li>Please always keep values in math functions above 0.</li>
569
+ <li>There is *no* Batch mode like in vanilla deforum. Please Use the txt2img tab for that.</li>
570
+ <li>For negative prompts, please write your positive prompt, then --neg ugly, text, assymetric, or any other negative tokens of your choice. OR:</li>
571
+ <li>Use the negative_prompts field to automatically append all words as a negative prompt. *Don't* add --neg in the negative_prompts field!</li>
572
+ <li>Prompts are stored in JSON format. If you've got an error, check it in a <a style="color:SteelBlue" href="https://odu.github.io/slingjsonlint/">JSON Validator</a></li>
573
+ </ul>
574
+ """)
575
+ with gr.Row():
576
+ animation_prompts = gr.Textbox(label="Prompts", lines=8, interactive=True, value = DeforumAnimPrompts())
577
+ with gr.Row():
578
+ animation_prompts_positive = gr.Textbox(label="Prompts positive", lines=1, interactive=True, value = "")
579
+ with gr.Row():
580
+ animation_prompts_negative = gr.Textbox(label="Prompts negative", lines=1, interactive=True, value = "")
581
+ # COMPOSABLE MASK SCHEDULING ACCORD
582
+ with gr.Accordion('Composable Mask scheduling', open=False):
583
+ gr.HTML("""
584
+ <ul style="list-style-type:circle; margin-left:0.75em; margin-bottom:0.2em">
585
+ <li>To enable, check use_mask in the Init tab</li>
586
+ <li>Supports boolean operations: (! - negation, & - and, | - or, ^ - xor, \ - difference, () - nested operations)</li>
587
+ <li>default variables: in \{\}, like \{init_mask\}, \{video_mask\}, \{everywhere\}</li>
588
+ <li>masks from files: in [], like [mask1.png]</li>
589
+ <li>description-based: <i>word masks</i> in &lt;&gt;, like &lt;apple&gt;, &lt;hair&gt</li>
590
+ </ul>
591
+ """)
592
+ with gr.Row():
593
+ mask_schedule = gr.Textbox(label="Mask schedule", lines=1, value = da.mask_schedule, interactive=True)
594
+ with gr.Row():
595
+ use_noise_mask = gr.Checkbox(label="Use noise mask", value=da.use_noise_mask, interactive=True)
596
+ with gr.Row():
597
+ noise_mask_schedule = gr.Textbox(label="Noise mask schedule", lines=1, value = da.noise_mask_schedule, interactive=True)
598
+ # INIT MAIN TAB
599
+ with gr.Tab('Init'):
600
+ # IMAGE INIT INNER-TAB
601
+ with gr.Tab('Image Init'):
602
+ with gr.Row():
603
+ with gr.Column(min_width=150):
604
+ use_init = gr.Checkbox(label="Use init", value=d.use_init, interactive=True, visible=True)
605
+ with gr.Column(min_width=150):
606
+ strength_0_no_init = gr.Checkbox(label="Strength 0 no init", value=True, interactive=True)
607
+ with gr.Column(min_width=170):
608
+ strength = gr.Slider(label="Strength", minimum=0, maximum=1, step=0.01, value=0, interactive=True)
609
+ with gr.Row():
610
+ init_image = gr.Textbox(label="Init image", lines=1, interactive=True, value = d.init_image)
611
+ # VIDEO INIT INNER-TAB
612
+ with gr.Tab('Video Init'):
613
+ with gr.Row():
614
+ video_init_path = gr.Textbox(label="Video init path", lines=1, value = da.video_init_path, interactive=True)
615
+ with gr.Row():
616
+ extract_from_frame = gr.Number(label="Extract from frame", value=da.extract_from_frame, interactive=True, precision=0)
617
+ extract_to_frame = gr.Number(label="Extract to frame", value=da.extract_to_frame, interactive=True, precision=0)
618
+ extract_nth_frame = gr.Number(label="Extract nth frame", value=da.extract_nth_frame, interactive=True, precision=0)
619
+ overwrite_extracted_frames = gr.Checkbox(label="Overwrite extracted frames", value=False, interactive=True)
620
+ use_mask_video = gr.Checkbox(label="Use mask video", value=False, interactive=True)
621
+ with gr.Row():
622
+ video_mask_path = gr.Textbox(label="Video mask path", lines=1, value = da.video_mask_path, interactive=True)
623
+ # MASK INIT INNER-TAB
624
+ with gr.Tab('Mask Init'):
625
+ with gr.Row():
626
+ use_mask = gr.Checkbox(label="Use mask", value=d.use_mask, interactive=True)
627
+ use_alpha_as_mask = gr.Checkbox(label="Use alpha as mask", value=d.use_alpha_as_mask, interactive=True)
628
+ invert_mask = gr.Checkbox(label="Invert mask", value=d.invert_mask, interactive=True)
629
+ overlay_mask = gr.Checkbox(label="Overlay mask", value=d.overlay_mask, interactive=True)
630
+ with gr.Row():
631
+ mask_file = gr.Textbox(label="Mask file", lines=1, interactive=True, value = d.mask_file)
632
+ with gr.Row():
633
+ mask_overlay_blur = gr.Slider(label="Mask overlay blur", minimum=0, maximum=64, step=1, value=d.mask_overlay_blur, interactive=True)
634
+ with gr.Row():
635
+ choice = mask_fill_choices[d.fill]
636
+ fill = gr.Radio(label='Mask fill', choices=mask_fill_choices, value=choice, type="index")
637
+ with gr.Row():
638
+ full_res_mask = gr.Checkbox(label="Full res mask", value=d.full_res_mask, interactive=True)
639
+ full_res_mask_padding = gr.Slider(minimum=0, maximum=512, step=1, label="Full res mask padding", value=d.full_res_mask_padding, interactive=True)
640
+ # PARSEQ ACCORD
641
+ with gr.Accordion('Parseq', open=False):
642
+ gr.HTML("""
643
+ Use an <a style='color:SteelBlue;' target='_blank' href='https://sd-parseq.web.app/deforum'>sd-parseq manifest</a> for your animation (leave blank to ignore).</p>
644
+ <p style="margin-top:1em">
645
+ Note that parseq overrides:
646
+ <ul style="list-style-type:circle; margin-left:2em; margin-bottom:1em">
647
+ <li>Run: seed, subseed, subseed strength.</li>
648
+ <li>Keyframes: generation settings (noise, strength, contrast, scale).</li>
649
+ <li>Keyframes: motion parameters for 2D and 3D (angle, zoom, translation, rotation, perspective flip).</li>
650
+ </ul>
651
+ </p>
652
+ <p">
653
+ Parseq does <strong><em>not</em></strong> override:
654
+ <ul style="list-style-type:circle; margin-left:2em; margin-bottom:1em">
655
+ <li>Run: Sampler, Width, Height, tiling, resize seed.</li>
656
+ <li>Keyframes: animation settings (animation mode, max frames, border) </li>
657
+ <li>Keyframes: coherence (color coherence & cadence) </li>
658
+ <li>Keyframes: depth warping</li>
659
+ <li>Output settings: all settings (including fps and max frames)</li>
660
+ </ul>
661
+ </p>
662
+ """)
663
+ with gr.Row():
664
+ parseq_manifest = gr.Textbox(label="Parseq Manifest (JSON or URL)", lines=4, value = dp.parseq_manifest, interactive=True)
665
+ with gr.Row():
666
+ parseq_use_deltas = gr.Checkbox(label="Use delta values for movement parameters", value=dp.parseq_use_deltas, interactive=True)
667
+ def show_hybrid_html_msg(choice):
668
+ if choice not in ['2D','3D']:
669
+ return gr.update(visible=True)
670
+ else:
671
+ return gr.update(visible=False)
672
+ def change_hybrid_tab_status(choice):
673
+ if choice in ['2D','3D']:
674
+ return gr.update(visible=True)
675
+ else:
676
+ return gr.update(visible=False)
677
+ # CONTROLNET TAB
678
+ with gr.Tab('ControlNet'):
679
+ gr.HTML("""
680
+ Requires the <a style='color:SteelBlue;' target='_blank' href='https://github.com/Mikubill/sd-webui-controlnet'>ControlNet</a> extension to be installed.</p>
681
+ <p style="margin-top:0.2em">
682
+ *Work In Progress*. All params below are going to be keyframable at some point. If you want to speedup the integration, join Deforum's development. &#128521;
683
+ </p>
684
+ <p">
685
+ Due to ControlNet base extension's inner works it needs its models to be located at 'extensions/deforum-for-automatic1111-webui/models'. So copy, symlink or move them there until a more elegant solution is found. And, as of now, it requires use_init checked for the first run. The ControlNet extension version used in the dev process is a24089a62e70a7fae44b7bf35b51fd584dd55e25, if even with all the other options above used it still breaks, upgrade/downgrade your CN version to this one.
686
+ </p>
687
+ """)
688
+ controlnet_dict = setup_controlnet_ui()
689
+ # HYBRID VIDEO TAB
690
+ with gr.Tab('Hybrid Video'):
691
+ # this html only shows when not in 2d/3d mode
692
+ hybrid_msg_html = gr.HTML(value='Please, change animation mode to 2D or 3D to enable Hybrid Mode',visible=False, elem_id='hybrid_msg_html')
693
+ # HYBRID INFO ACCORD
694
+ with gr.Accordion("Info & Help", open=False):
695
+ hybrid_html = "<p style=\"padding-bottom:0\"><b style=\"text-shadow: blue -1px -1px;\">Hybrid Video Compositing in 2D/3D Mode</b><span style=\"color:#DDD;font-size:0.7rem;text-shadow: black -1px -1px;margin-left:10px;\">by <a href=\"https://github.com/reallybigname\">reallybigname</a></span></p>"
696
+ hybrid_html += "<ul style=\"list-style-type:circle; margin-left:1em; margin-bottom:1em;\"><li>Composite video with previous frame init image in <b>2D or 3D animation_mode</b> <i>(not for Video Input mode)</i></li>"
697
+ hybrid_html += "<li>Uses your <b>Init</b> settings for <b>video_init_path, extract_nth_frame, overwrite_extracted_frames</b></li>"
698
+ hybrid_html += "<li>In Keyframes tab, you can also set <b>color_coherence</b> = '<b>Video Input</b>'</li>"
699
+ hybrid_html += "<li><b>color_coherence_video_every_N_frames</b> lets you only match every N frames</li>"
700
+ hybrid_html += "<li>Color coherence may be used with hybrid composite off, to just use video color.</li>"
701
+ hybrid_html += "<li>Hybrid motion may be used with hybrid composite off, to just use video motion.</li></ul>"
702
+ hybrid_html += "Hybrid Video Schedules"
703
+ hybrid_html += "<ul style=\"list-style-type:circle; margin-left:1em; margin-bottom:1em;\"><li>The alpha schedule controls overall alpha for video mix, whether using a composite mask or not.</li>"
704
+ hybrid_html += "<li>The <b>hybrid_comp_mask_blend_alpha_schedule</b> only affects the 'Blend' <b>hybrid_comp_mask_type</b>.</li>"
705
+ hybrid_html += "<li>Mask contrast schedule is from 0-255. Normal is 1. Affects all masks.</li>"
706
+ hybrid_html += "<li>Autocontrast low/high cutoff schedules 0-100. Low 0 High 100 is full range. <br>(<i><b>hybrid_comp_mask_auto_contrast</b> must be enabled</i>)</li></ul>"
707
+ hybrid_html += "<a style='color:SteelBlue;' target='_blank' href='https://github.com/deforum-art/deforum-for-automatic1111-webui/wiki/Animation-Settings#hybrid-video-mode-for-2d3d-animations'>Click Here</a> for more info/ a Guide."
708
+ gr.HTML(hybrid_html)
709
+ # HYBRID SETTINGS ACCORD
710
+ with gr.Accordion("Hybrid Settings", open=True) as hybrid_settings_accord:
711
+ with gr.Row(variant='compact'):
712
+ with gr.Column(min_width=340):
713
+ with gr.Row(variant='compact'):
714
+ hybrid_generate_inputframes = gr.Checkbox(label="Generate inputframes", value=False, interactive=True)
715
+ hybrid_composite = gr.Checkbox(label="Hybrid composite", value=False, interactive=True)
716
+ with gr.Column(min_width=340) as hybrid_2nd_column:
717
+ with gr.Row(variant='compact'):
718
+ hybrid_use_first_frame_as_init_image = gr.Checkbox(label="First frame as init image", value=da.hybrid_use_first_frame_as_init_image, interactive=True, visible=False)
719
+ hybrid_motion_use_prev_img = gr.Checkbox(label="Motion use prev img", value=False, interactive=True, visible=False)
720
+ with gr.Row() as hybrid_flow_row:
721
+ with gr.Column(variant='compact'):
722
+ with gr.Row(variant='compact'):
723
+ hybrid_motion = gr.Radio(['None', 'Optical Flow', 'Perspective', 'Affine'], label="Hybrid motion", value=da.hybrid_motion, elem_id="hybrid_motion")
724
+ with gr.Column(variant='compact'):
725
+ with gr.Row(variant='compact'):
726
+ with gr.Column(scale=1):
727
+ hybrid_flow_method = gr.Radio(['DIS Medium', 'Farneback'], label="Flow method", value=da.hybrid_flow_method, elem_id="hybrid_flow_method", visible=False)
728
+ hybrid_comp_mask_type = gr.Radio(['None', 'Depth', 'Video Depth', 'Blend', 'Difference'], label="Comp mask type", value=da.hybrid_comp_mask_type, elem_id="hybrid_comp_mask_type", visible=False)
729
+ with gr.Row(visible=False, variant='compact') as hybrid_comp_mask_row:
730
+ hybrid_comp_mask_equalize = gr.Radio(['None', 'Before', 'After', 'Both'], label="Comp mask equalize", value=da.hybrid_comp_mask_equalize, elem_id="hybrid_comp_mask_equalize")
731
+ with gr.Column(variant='compact'):
732
+ hybrid_comp_mask_auto_contrast = gr.Checkbox(label="Comp mask auto contrast", value=False, interactive=True)
733
+ hybrid_comp_mask_inverse = gr.Checkbox(label="Comp mask inverse", value=False, interactive=True)
734
+ with gr.Row(variant='compact'):
735
+ hybrid_comp_save_extra_frames = gr.Checkbox(label="Comp save extra frames", value=False, interactive=True)
736
+ # HYBRID SCHEDULES ACCORD
737
+ with gr.Accordion("Hybrid Schedules", open=False, visible=False) as hybrid_sch_accord:
738
+ with gr.Row(variant='compact') as hybrid_comp_alpha_schedule_row:
739
+ hybrid_comp_alpha_schedule = gr.Textbox(label="Comp alpha schedule", lines=1, value = da.hybrid_comp_alpha_schedule, interactive=True)
740
+ with gr.Row(variant='compact', visible=False) as hybrid_comp_mask_blend_alpha_schedule_row:
741
+ hybrid_comp_mask_blend_alpha_schedule = gr.Textbox(label="Comp mask blend alpha schedule", lines=1, value = da.hybrid_comp_mask_blend_alpha_schedule, interactive=True, elem_id="hybridelemtest")
742
+ with gr.Row(variant='compact', visible=False) as hybrid_comp_mask_contrast_schedule_row:
743
+ hybrid_comp_mask_contrast_schedule = gr.Textbox(label="Comp mask contrast schedule", lines=1, value = da.hybrid_comp_mask_contrast_schedule, interactive=True)
744
+ with gr.Row(variant='compact', visible=False) as hybrid_comp_mask_auto_contrast_cutoff_high_schedule_row :
745
+ hybrid_comp_mask_auto_contrast_cutoff_high_schedule = gr.Textbox(label="Comp mask auto contrast cutoff high schedule", lines=1, value = da.hybrid_comp_mask_auto_contrast_cutoff_high_schedule, interactive=True)
746
+ with gr.Row(variant='compact', visible=False) as hybrid_comp_mask_auto_contrast_cutoff_low_schedule_row:
747
+ hybrid_comp_mask_auto_contrast_cutoff_low_schedule = gr.Textbox(label="Comp mask auto contrast cutoff low schedule", lines=1, value = da.hybrid_comp_mask_auto_contrast_cutoff_low_schedule, interactive=True)
748
+ # HUMANS MASKING ACCORD
749
+ with gr.Accordion("Humans Masking", open=False, visible=False) as humans_masking_accord:
750
+ with gr.Row(variant='compact'):
751
+ hybrid_generate_human_masks = gr.Radio(['None', 'PNGs', 'Video', 'Both'], label="Generate human masks", value=da.hybrid_generate_human_masks, elem_id="hybrid_generate_human_masks")
752
+ # OUTPUT TAB
753
+ with gr.Tab('Output'):
754
+ # VID OUTPUT ACCORD
755
+ with gr.Accordion('Video Output Settings', open=True):
756
+ with gr.Row(variant='compact') as fps_out_format_row:
757
+ fps = gr.Slider(label="FPS", value=dv.fps, minimum=1, maximum=240, step=1)
758
+ # NOT VISIBLE AS OF 11-02-23 moving to ffmpeg-only!
759
+ output_format = gr.Dropdown(visible=False, label="Output format", choices=['FFMPEG mp4'], value='FFMPEG mp4', type="value", elem_id="output_format", interactive=True)
760
+ with gr.Column(variant='compact'):
761
+ with gr.Row(variant='compact') as soundtrack_row:
762
+ add_soundtrack = gr.Radio(['None', 'File', 'Init Video'], label="Add soundtrack", value=dv.add_soundtrack)
763
+ soundtrack_path = gr.Textbox(label="Soundtrack path", lines=1, interactive=True, value = dv.soundtrack_path)
764
+ with gr.Row(variant='compact'):
765
+ skip_video_for_run_all = gr.Checkbox(label="Skip video for run all", value=dv.skip_video_for_run_all, interactive=True)
766
+ store_frames_in_ram = gr.Checkbox(label="Store frames in ram", value=dv.store_frames_in_ram, interactive=True)
767
+ save_depth_maps = gr.Checkbox(label="Save depth maps", value=da.save_depth_maps, interactive=True)
768
+ # the following param only shows for windows and linux users!
769
+ make_gif = gr.Checkbox(label="Make GIF", value=dv.make_gif, interactive=True)
770
+ with gr.Row(equal_height=True, variant='compact', visible=(True if dr.current_user_os in ["Windows", "Linux", "Mac"] else False)) as r_upscale_row:
771
+ r_upscale_video = gr.Checkbox(label="Upscale", value=dv.r_upscale_video, interactive=True)
772
+ r_upscale_model = gr.Dropdown(label="Upscale model", choices=['realesr-animevideov3', 'realesrgan-x4plus', 'realesrgan-x4plus-anime'], interactive=True, value = dv.r_upscale_model, type="value")
773
+ r_upscale_factor = gr.Dropdown(choices=['x2', 'x3', 'x4'], label="Upscale factor", interactive=True, value=dv.r_upscale_factor, type="value")
774
+ r_upscale_keep_imgs = gr.Checkbox(label="Keep Imgs", value=dv.r_upscale_keep_imgs, interactive=True)
775
+ with gr.Accordion('FFmpeg settings', visible=True, open=False) as ffmpeg_quality_accordion:
776
+ with gr.Row(equal_height=True, variant='compact', visible=True) as ffmpeg_set_row:
777
+ ffmpeg_crf = gr.Slider(minimum=0, maximum=51, step=1, label="CRF", value=dv.ffmpeg_crf, interactive=True)
778
+ ffmpeg_preset = gr.Dropdown(label="Preset", choices=['veryslow', 'slower', 'slow', 'medium', 'fast', 'faster', 'veryfast', 'superfast', 'ultrafast'], interactive=True, value = dv.ffmpeg_preset, type="value")
779
+ with gr.Row(equal_height=True, variant='compact', visible=True) as ffmpeg_location_row:
780
+ ffmpeg_location = gr.Textbox(label="Location", lines=1, interactive=True, value = dv.ffmpeg_location)
781
+ # FRAME INTERPOLATION TAB
782
+ with gr.Tab('Frame Interoplation') as frame_interp_tab:
783
+ with gr.Accordion('Important notes and Help', open=False):
784
+ gr.HTML("""
785
+ Use <a href="https://github.com/megvii-research/ECCV2022-RIFE">RIFE</a> / <a href="https://film-net.github.io/">FILM</a> Frame Interpolation to smooth out, slow-mo (or both) any video.</p>
786
+ <p style="margin-top:1em">
787
+ Supported engines:
788
+ <ul style="list-style-type:circle; margin-left:1em; margin-bottom:1em">
789
+ <li>RIFE v4.6 and FILM.</li>
790
+ </ul>
791
+ </p>
792
+ <p style="margin-top:1em">
793
+ Important notes:
794
+ <ul style="list-style-type:circle; margin-left:1em; margin-bottom:1em">
795
+ <li>Frame Interpolation will *not* run if any of the following are enabled: 'Store frames in ram' / 'Skip video for run all'.</li>
796
+ <li>Audio (if provided) will *not* be transferred to the interpolated video if Slow-Mo is enabled.</li>
797
+ <li>'add_soundtrack' and 'soundtrack_path' aren't being honoured in "Interpolate an existing video" mode. Original vid audio will be used instead with the same slow-mo rules above.</li>
798
+ </ul>
799
+ </p>
800
+ """)
801
+ with gr.Column(variant='compact'):
802
+ with gr.Row(variant='compact'):
803
+ # Interpolation Engine
804
+ frame_interpolation_engine = gr.Dropdown(label="Engine", choices=['None','RIFE v4.6','FILM'], value=dv.frame_interpolation_engine, type="value", elem_id="frame_interpolation_engine", interactive=True)
805
+ frame_interpolation_slow_mo_enabled = gr.Checkbox(label="Slow Mo", elem_id="frame_interpolation_slow_mo_enabled", value=dv.frame_interpolation_slow_mo_enabled, interactive=True, visible=False)
806
+ # If this is set to True, we keep all of the interpolated frames in a folder. Default is False - means we delete them at the end of the run
807
+ frame_interpolation_keep_imgs = gr.Checkbox(label="Keep Imgs", elem_id="frame_interpolation_keep_imgs", value=dv.frame_interpolation_keep_imgs, interactive=True, visible=False)
808
+ with gr.Row(variant='compact', visible=False) as frame_interp_amounts_row:
809
+ with gr.Column(min_width=180) as frame_interp_x_amount_column:
810
+ # How many times to interpolate (interp X)
811
+ frame_interpolation_x_amount = gr.Slider(minimum=2, maximum=10, step=1, label="Interp X", value=dv.frame_interpolation_x_amount, interactive=True)
812
+ with gr.Column(min_width=180, visible=False) as frame_interp_slow_mo_amount_column:
813
+ # Interp Slow-Mo (setting final output fps, not really doing anything direclty with RIFE/FILM)
814
+ frame_interpolation_slow_mo_amount = gr.Slider(minimum=2, maximum=10, step=1, label="Slow-Mo X", value=dv.frame_interpolation_x_amount, interactive=True)
815
+ # TODO: move these from here when done
816
+ def hide_slow_mo(choice):
817
+ return gr.update(visible=True) if choice else gr.update(visible=False)
818
+ def hide_interp_by_interp_status(choice):
819
+ return gr.update(visible=False) if choice == 'None' else gr.update(visible=True)
820
+ def change_interp_x_max_limit(engine_name, current_value):
821
+ if engine_name == 'FILM':
822
+ return gr.update(maximum=300)
823
+ elif current_value > 10:
824
+ return gr.update(maximum=10, value=2)
825
+ return gr.update(maximum=10)
826
+ frame_interpolation_slow_mo_enabled.change(fn=hide_slow_mo,inputs=frame_interpolation_slow_mo_enabled,outputs=frame_interp_slow_mo_amount_column)
827
+ interp_hide_list = [frame_interpolation_slow_mo_enabled,frame_interpolation_keep_imgs,frame_interp_amounts_row]
828
+ for output in interp_hide_list:
829
+ frame_interpolation_engine.change(fn=hide_interp_by_interp_status,inputs=frame_interpolation_engine,outputs=output)
830
+ frame_interpolation_engine.change(fn=change_interp_x_max_limit,inputs=[frame_interpolation_engine,frame_interpolation_x_amount],outputs=frame_interpolation_x_amount)
831
+ with gr.Row(visible=False) as interp_existing_video_row:
832
+ # Intrpolate any existing video from the connected PC
833
+ with gr.Accordion('Interpolate an existing video', open=False) as interp_existing_video_accord:
834
+ # A drag-n-drop UI box to which the user uploads a *single* (at this stage) video
835
+ vid_to_interpolate_chosen_file = gr.File(label="Video to Interpolate", interactive=True, file_count="single", file_types=["video"], elem_id="vid_to_interpolate_chosen_file")
836
+ with gr.Row(variant='compact'):
837
+ # Non interactive textbox showing uploaded input vid total Frame Count
838
+ in_vid_frame_count_window = gr.Textbox(label="In Frame Count", lines=1, interactive=False, value='---')
839
+ # Non interactive textbox showing uploaded input vid FPS
840
+ in_vid_fps_ui_window = gr.Textbox(label="In FPS", lines=1, interactive=False, value='---')
841
+ # Non interactive textbox showing expected output interpolated video FPS
842
+ out_interp_vid_estimated_fps = gr.Textbox(label="Interpolated Vid FPS", value='---')
843
+ # This is the actual button that's pressed to initiate the interpolation:
844
+ interpolate_button = gr.Button(value="*Interpolate uploaded video*")
845
+ # Show a text about CLI outputs:
846
+ gr.HTML("* check your CLI for outputs")
847
+ # make the functin call when the interpolation button is clicked
848
+ interpolate_button.click(upload_vid_to_interpolate,inputs=[vid_to_interpolate_chosen_file, frame_interpolation_engine, frame_interpolation_x_amount, frame_interpolation_slow_mo_enabled, frame_interpolation_slow_mo_amount, frame_interpolation_keep_imgs, ffmpeg_location, ffmpeg_crf, ffmpeg_preset, in_vid_fps_ui_window])
849
+ [change_fn.change(set_interp_out_fps, inputs=[frame_interpolation_x_amount, frame_interpolation_slow_mo_enabled, frame_interpolation_slow_mo_amount, in_vid_fps_ui_window], outputs=out_interp_vid_estimated_fps) for change_fn in [frame_interpolation_x_amount, frame_interpolation_slow_mo_amount, frame_interpolation_slow_mo_enabled]]
850
+ # Populate the above FPS and FCount values as soon as a video is uploaded to the FileUploadBox (vid_to_interpolate_chosen_file)
851
+ vid_to_interpolate_chosen_file.change(gradio_f_interp_get_fps_and_fcount,inputs=[vid_to_interpolate_chosen_file, frame_interpolation_x_amount, frame_interpolation_slow_mo_enabled, frame_interpolation_slow_mo_amount],outputs=[in_vid_fps_ui_window,in_vid_frame_count_window, out_interp_vid_estimated_fps])
852
+ #TODO: move this from here
853
+ interp_hide_list = [frame_interpolation_slow_mo_enabled,frame_interpolation_keep_imgs,frame_interp_amounts_row,interp_existing_video_row]
854
+ for output in interp_hide_list:
855
+ frame_interpolation_engine.change(fn=hide_interp_by_interp_status,inputs=frame_interpolation_engine,outputs=output)
856
+ # TODO: add upscalers parameters to the settings and make them a part of the pipeline
857
+ # VIDEO UPSCALE TAB
858
+ with gr.Tab('Video Upscaling'):
859
+ vid_to_upscale_chosen_file = gr.File(label="Video to Upscale", interactive=True, file_count="single", file_types=["video"], elem_id="vid_to_upscale_chosen_file")
860
+ with gr.Column():
861
+ # NCNN UPSCALE TAB
862
+ with gr.Tab('Upscale V2') as ncnn_upscale_tab:
863
+ with gr.Row(variant='compact') as ncnn_upload_vid_stats_row:
864
+ # Non interactive textbox showing uploaded input vid total Frame Count
865
+ ncnn_upscale_in_vid_frame_count_window = gr.Textbox(label="In Frame Count", lines=1, interactive=False, value='---')
866
+ # Non interactive textbox showing uploaded input vid FPS
867
+ ncnn_upscale_in_vid_fps_ui_window = gr.Textbox(label="In FPS", lines=1, interactive=False, value='---')
868
+ # Non interactive textbox showing uploaded input resolution
869
+ ncnn_upscale_in_vid_res = gr.Textbox(label="In Res", lines=1, interactive=False, value='---')
870
+ # Non interactive textbox showing expected output resolution
871
+ ncnn_upscale_out_vid_res = gr.Textbox(label="Out Res", value='---')
872
+ with gr.Column():
873
+ with gr.Row(variant='compact', visible=(True if dr.current_user_os in ["Windows", "Linux", "Mac"] else False)) as ncnn_actual_upscale_row:
874
+ ncnn_upscale_model = gr.Dropdown(label="Upscale model", choices=['realesr-animevideov3', 'realesrgan-x4plus', 'realesrgan-x4plus-anime'], interactive=True, value = "realesr-animevideov3", type="value")
875
+ ncnn_upscale_factor = gr.Dropdown(choices=['x2', 'x3', 'x4'], label="Upscale factor", interactive=True, value="x2", type="value")
876
+ ncnn_upscale_keep_imgs = gr.Checkbox(label="Keep Imgs", value=True, interactive=True) # fix value
877
+ ncnn_upscale_btn = gr.Button(value="*Upscale uploaded video*")
878
+ ncnn_upscale_btn.click(ncnn_upload_vid_to_upscale,inputs=[vid_to_upscale_chosen_file, ncnn_upscale_in_vid_fps_ui_window, ncnn_upscale_in_vid_res, ncnn_upscale_out_vid_res, ncnn_upscale_model, ncnn_upscale_factor, ncnn_upscale_keep_imgs, ffmpeg_location, ffmpeg_crf, ffmpeg_preset])
879
+ with gr.Tab('Upscale V1'):
880
+ with gr.Column():
881
+ selected_tab = gr.State(value=0)
882
+ with gr.Tabs(elem_id="extras_resize_mode"):
883
+ with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by:
884
+ upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=2, elem_id="extras_upscaling_resize")
885
+ with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to:
886
+ with FormRow():
887
+ upscaling_resize_w = gr.Slider(label="Width", minimum=1, maximum=7680, step=1, value=512, elem_id="extras_upscaling_resize_w")
888
+ upscaling_resize_h = gr.Slider(label="Height", minimum=1, maximum=7680, step=1, value=512, elem_id="extras_upscaling_resize_h")
889
+ upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
890
+ with FormRow():
891
+ extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in sh.sd_upscalers], value=sh.sd_upscalers[3].name)
892
+ extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in sh.sd_upscalers], value=sh.sd_upscalers[0].name)
893
+ with FormRow():
894
+ with gr.Column(scale=3):
895
+ extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility")
896
+ with gr.Column(scale=1, min_width=80):
897
+ upscale_keep_imgs = gr.Checkbox(label="Keep Imgs", elem_id="upscale_keep_imgs", value=True, interactive=True)
898
+ tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab])
899
+ tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab])
900
+ # This is the actual button that's pressed to initiate the Upscaling:
901
+ upscale_btn = gr.Button(value="*Upscale uploaded video*")
902
+ # Show a text about CLI outputs:
903
+ gr.HTML("* check your CLI for outputs")
904
+ # make the function call when the UPSCALE button is clicked
905
+ upscale_btn.click(upload_vid_to_upscale,inputs=[vid_to_upscale_chosen_file, selected_tab, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_keep_imgs, ffmpeg_location, ffmpeg_crf, ffmpeg_preset])
906
+ # STITCH FRAMES TO VID TAB
907
+ with gr.Tab('Frames to Video') as stitch_imgs_to_vid_row:
908
+ with gr.Row(visible=False):
909
+ path_name_modifier = gr.Dropdown(label="Path name modifier", choices=['x0_pred', 'x'], value=dv.path_name_modifier, type="value", elem_id="path_name_modifier", interactive=True, visible=False)
910
+ gr.HTML("""
911
+ <p style="margin-top:0em">
912
+ Important Notes:
913
+ <ul style="list-style-type:circle; margin-left:1em; margin-bottom:0.25em">
914
+ <li>Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%05d.png', just replace 20230124234916 with your batch ID. The %05d is important, don't forget it!</li>
915
+ </ul>
916
+ """)
917
+ with gr.Row(variant='compact'):
918
+ image_path = gr.Textbox(label="Image path", lines=1, interactive=True, value = dv.image_path)
919
+ with gr.Row(visible=False):
920
+ mp4_path = gr.Textbox(label="MP4 path", lines=1, interactive=True, value = dv.mp4_path)
921
+ # not visible as of 06-02-23 since render_steps is disabled as well and they work together. Need to fix both.
922
+ with gr.Row(visible=False):
923
+ # rend_step Never worked - set to visible false 28-1-23 # MOVE OUT FROM HERE!
924
+ render_steps = gr.Checkbox(label="Render steps", value=dv.render_steps, interactive=True, visible=False)
925
+ ffmpeg_stitch_imgs_but = gr.Button(value="*Stitch frames to video*")
926
+ ffmpeg_stitch_imgs_but.click(direct_stitch_vid_from_frames,inputs=[image_path, fps, ffmpeg_location, ffmpeg_crf, ffmpeg_preset, add_soundtrack, soundtrack_path])
927
+ # **OLD + NON ACTIVES AREA**
928
+ with gr.Accordion(visible=False, label='INVISIBLE') as not_in_use_accordion:
929
+ # NOT VISIBLE AS OF 09-02-23
930
+ mask_contrast_adjust = gr.Slider(label="Mask contrast adjust", minimum=0, maximum=1, step=0.01, value=d.mask_contrast_adjust, interactive=True)
931
+ mask_brightness_adjust = gr.Slider(label="Mask brightness adjust", minimum=0, maximum=1, step=0.01, value=d.mask_brightness_adjust, interactive=True)
932
+ from_img2img_instead_of_link = gr.Checkbox(label="from_img2img_instead_of_link", value=False, interactive=False, visible=False)
933
+ # INVISIBLE AS OF 08-02 (with static value of 8 for both W and H). Was in Perlin section before Perlin Octaves/Persistence
934
+ with gr.Column(min_width=200, visible=False):
935
+ perlin_w = gr.Slider(label="Perlin W", minimum=0.1, maximum=16, step=0.1, value=da.perlin_w, interactive=True)
936
+ perlin_h = gr.Slider(label="Perlin H", minimum=0.1, maximum=16, step=0.1, value=da.perlin_h, interactive=True)
937
+ with gr.Row(visible=False):
938
+ filename_format = gr.Textbox(label="Filename format", lines=1, interactive=True, value = d.filename_format, visible=False)
939
+ with gr.Row(visible=False):
940
+ save_settings = gr.Checkbox(label="save_settings", value=d.save_settings, interactive=True)
941
+ with gr.Row(visible=False):
942
+ save_samples = gr.Checkbox(label="save_samples", value=d.save_samples, interactive=True)
943
+ display_samples = gr.Checkbox(label="display_samples", value=False, interactive=False)
944
+ # NOT VISIBLE 11-02-23 htai
945
+ with gr.Accordion('Subseed controls & More', open=False, visible=False):
946
+ # Not visible until fixed, 06-02-23
947
+ # NOT VISIBLE as of 11-02 - we have sch now. will delete the actual params in a later date
948
+ with gr.Row(variant='compact', visible=False):
949
+ seed_enable_extras = gr.Checkbox(label="Enable subseed controls", value=False)
950
+ n_batch = gr.Number(label="N Batch", value=d.n_batch, interactive=True, precision=0, visible=False)
951
+ with gr.Row(visible=False):
952
+ save_sample_per_step = gr.Checkbox(label="Save sample per step", value=d.save_sample_per_step, interactive=True)
953
+ show_sample_per_step = gr.Checkbox(label="Show sample per step", value=d.show_sample_per_step, interactive=True)
954
+ # Gradio's Change functions - hiding and renaming elements based on other elements
955
+ fps.change(fn=change_gif_button_visibility, inputs=fps, outputs=make_gif)
956
+ r_upscale_model.change(fn=update_r_upscale_factor, inputs=r_upscale_model, outputs=r_upscale_factor)
957
+ ncnn_upscale_model.change(fn=update_r_upscale_factor, inputs=ncnn_upscale_model, outputs=ncnn_upscale_factor)
958
+ ncnn_upscale_model.change(update_upscale_out_res_by_model_name, inputs=[ncnn_upscale_in_vid_res, ncnn_upscale_model], outputs=ncnn_upscale_out_vid_res)
959
+ ncnn_upscale_factor.change(update_upscale_out_res, inputs=[ncnn_upscale_in_vid_res, ncnn_upscale_factor], outputs=ncnn_upscale_out_vid_res)
960
+ vid_to_upscale_chosen_file.change(vid_upscale_gradio_update_stats,inputs=[vid_to_upscale_chosen_file, ncnn_upscale_factor],outputs=[ncnn_upscale_in_vid_fps_ui_window, ncnn_upscale_in_vid_frame_count_window, ncnn_upscale_in_vid_res, ncnn_upscale_out_vid_res])
961
+ animation_mode.change(fn=change_max_frames_visibility, inputs=animation_mode, outputs=max_frames)
962
+ animation_mode.change(fn=change_diffusion_cadence_visibility, inputs=animation_mode, outputs=diffusion_cadence)
963
+ animation_mode.change(fn=disble_3d_related_stuff, inputs=animation_mode, outputs=depth_3d_warping_accord)
964
+ animation_mode.change(fn=disble_3d_related_stuff, inputs=animation_mode, outputs=fov_accord)
965
+ animation_mode.change(fn=disble_3d_related_stuff, inputs=animation_mode, outputs=only_3d_motion_column)
966
+ animation_mode.change(fn=enable_2d_related_stuff, inputs=animation_mode, outputs=only_2d_motion_column)
967
+ animation_mode.change(fn=disable_by_interpolation, inputs=animation_mode, outputs=force_grayscale_column)
968
+ animation_mode.change(fn=disable_pers_flip_accord, inputs=animation_mode, outputs=perspective_flip_accord)
969
+ animation_mode.change(fn=disable_pers_flip_accord, inputs=animation_mode, outputs=both_anim_mode_motion_params_column)
970
+ #Hybrid related:
971
+ animation_mode.change(fn=show_hybrid_html_msg, inputs=animation_mode, outputs=hybrid_msg_html)
972
+ animation_mode.change(fn=change_hybrid_tab_status, inputs=animation_mode, outputs=hybrid_sch_accord)
973
+ animation_mode.change(fn=change_hybrid_tab_status, inputs=animation_mode, outputs=hybrid_settings_accord)
974
+ animation_mode.change(fn=change_hybrid_tab_status, inputs=animation_mode, outputs=humans_masking_accord)
975
+ hybrid_comp_mask_type.change(fn=change_comp_mask_x_visibility, inputs=hybrid_comp_mask_type, outputs=hybrid_comp_mask_row)
976
+ hybrid_motion.change(fn=disable_by_non_optical_flow, inputs=hybrid_motion, outputs=hybrid_flow_method)
977
+ hybrid_motion.change(fn=disable_by_comp_mask, inputs=hybrid_motion, outputs=hybrid_motion_use_prev_img)
978
+ hybrid_composite.change(fn=disable_by_hybrid_composite_dynamic, inputs=[hybrid_composite, hybrid_comp_mask_type], outputs=hybrid_comp_mask_row)
979
+ hybrid_composite_outputs = [humans_masking_accord, hybrid_sch_accord, hybrid_comp_mask_type, hybrid_use_first_frame_as_init_image]
980
+ for output in hybrid_composite_outputs:
981
+ hybrid_composite.change(fn=disable_by_hybrid_composite, inputs=hybrid_composite, outputs=output)
982
+ hybrid_comp_mask_type_outputs = [hybrid_comp_mask_blend_alpha_schedule_row, hybrid_comp_mask_contrast_schedule_row, hybrid_comp_mask_auto_contrast_cutoff_high_schedule_row, hybrid_comp_mask_auto_contrast_cutoff_low_schedule_row]
983
+ for output in hybrid_comp_mask_type_outputs:
984
+ hybrid_comp_mask_type.change(fn=disable_by_comp_mask, inputs=hybrid_comp_mask_type, outputs=output)
985
+ # End of hybrid related
986
+ seed_behavior.change(fn=change_seed_iter_visibility, inputs=seed_behavior, outputs=seed_iter_N_row)
987
+ seed_behavior.change(fn=change_seed_schedule_visibility, inputs=seed_behavior, outputs=seed_schedule_row)
988
+ color_coherence.change(fn=change_color_coherence_video_every_N_frames_visibility, inputs=color_coherence, outputs=color_coherence_video_every_N_frames_row)
989
+ noise_type.change(fn=change_perlin_visibility, inputs=noise_type, outputs=perlin_row)
990
+ skip_video_for_run_all_outputs = [fps_out_format_row, soundtrack_row, ffmpeg_quality_accordion, store_frames_in_ram, make_gif, r_upscale_row]
991
+ for output in skip_video_for_run_all_outputs:
992
+ skip_video_for_run_all.change(fn=change_visibility_from_skip_video, inputs=skip_video_for_run_all, outputs=output)
993
+ # END OF UI TABS
994
+ stuff = locals()
995
+ stuff = {**stuff, **controlnet_dict}
996
+ stuff.pop('controlnet_dict')
997
+ return stuff
998
+
999
+ ### SETTINGS STORAGE UPDATE! 2023-01-27
1000
+ ### To Reduce The Number Of Settings Overrides,
1001
+ ### They Are Being Passed As Dictionaries
1002
+ ### It Would Have Been Also Nice To Retrieve Them
1003
+ ### From Functions Like Deforumoutputargs(),
1004
+ ### But Over Time There Was Some Cross-Polination,
1005
+ ### So They Are Now Hardcoded As 'List'-Strings Below
1006
+ ### If you're adding a new setting, add it to one of the lists
1007
+ ### besides writing it in the setup functions above
1008
+
1009
+ anim_args_names = str(r'''animation_mode, max_frames, border,
1010
+ angle, zoom, translation_x, translation_y, translation_z,
1011
+ rotation_3d_x, rotation_3d_y, rotation_3d_z,
1012
+ enable_perspective_flip,
1013
+ perspective_flip_theta, perspective_flip_phi, perspective_flip_gamma, perspective_flip_fv,
1014
+ noise_schedule, strength_schedule, contrast_schedule, cfg_scale_schedule, pix2pix_img_cfg_scale_schedule,
1015
+ enable_subseed_scheduling, subseed_schedule, subseed_strength_schedule,
1016
+ enable_steps_scheduling, steps_schedule,
1017
+ fov_schedule, near_schedule, far_schedule,
1018
+ seed_schedule,
1019
+ enable_sampler_scheduling, sampler_schedule,
1020
+ mask_schedule, use_noise_mask, noise_mask_schedule,
1021
+ enable_checkpoint_scheduling, checkpoint_schedule,
1022
+ enable_clipskip_scheduling, clipskip_schedule,
1023
+ kernel_schedule, sigma_schedule, amount_schedule, threshold_schedule,
1024
+ color_coherence, color_coherence_video_every_N_frames, color_force_grayscale,
1025
+ diffusion_cadence,
1026
+ noise_type, perlin_w, perlin_h, perlin_octaves, perlin_persistence,
1027
+ use_depth_warping, midas_weight,
1028
+ padding_mode, sampling_mode, save_depth_maps,
1029
+ video_init_path, extract_nth_frame, extract_from_frame, extract_to_frame, overwrite_extracted_frames,
1030
+ use_mask_video, video_mask_path,
1031
+ resume_from_timestring, resume_timestring'''
1032
+ ).replace("\n", "").replace("\r", "").replace(" ", "").split(',')
1033
+ hybrid_args_names = str(r'''hybrid_generate_inputframes, hybrid_generate_human_masks, hybrid_use_first_frame_as_init_image,
1034
+ hybrid_motion, hybrid_motion_use_prev_img, hybrid_flow_method, hybrid_composite, hybrid_comp_mask_type, hybrid_comp_mask_inverse,
1035
+ hybrid_comp_mask_equalize, hybrid_comp_mask_auto_contrast, hybrid_comp_save_extra_frames,
1036
+ hybrid_comp_alpha_schedule, hybrid_comp_mask_blend_alpha_schedule, hybrid_comp_mask_contrast_schedule,
1037
+ hybrid_comp_mask_auto_contrast_cutoff_high_schedule, hybrid_comp_mask_auto_contrast_cutoff_low_schedule'''
1038
+ ).replace("\n", "").replace("\r", "").replace(" ", "").split(',')
1039
+ args_names = str(r'''W, H, tiling, restore_faces,
1040
+ seed, sampler,
1041
+ seed_enable_extras, seed_resize_from_w, seed_resize_from_h,
1042
+ steps, ddim_eta,
1043
+ n_batch,
1044
+ save_settings, save_samples, display_samples,
1045
+ save_sample_per_step, show_sample_per_step,
1046
+ batch_name, filename_format,
1047
+ seed_behavior, seed_iter_N,
1048
+ use_init, from_img2img_instead_of_link, strength_0_no_init, strength, init_image,
1049
+ use_mask, use_alpha_as_mask, invert_mask, overlay_mask,
1050
+ mask_file, mask_contrast_adjust, mask_brightness_adjust, mask_overlay_blur,
1051
+ fill, full_res_mask, full_res_mask_padding,
1052
+ reroll_blank_frames'''
1053
+ ).replace("\n", "").replace("\r", "").replace(" ", "").split(',')
1054
+ video_args_names = str(r'''skip_video_for_run_all,
1055
+ fps, make_gif, output_format, ffmpeg_location, ffmpeg_crf, ffmpeg_preset,
1056
+ add_soundtrack, soundtrack_path,
1057
+ r_upscale_video, r_upscale_model, r_upscale_factor, r_upscale_keep_imgs,
1058
+ render_steps,
1059
+ path_name_modifier, image_path, mp4_path, store_frames_in_ram,
1060
+ frame_interpolation_engine, frame_interpolation_x_amount, frame_interpolation_slow_mo_enabled, frame_interpolation_slow_mo_amount,
1061
+ frame_interpolation_keep_imgs'''
1062
+ ).replace("\n", "").replace("\r", "").replace(" ", "").split(',')
1063
+ parseq_args_names = str(r'''parseq_manifest, parseq_use_deltas'''
1064
+ ).replace("\n", "").replace("\r", "").replace(" ", "").split(',')
1065
+ loop_args_names = str(r'''use_looper, init_images, image_strength_schedule, blendFactorMax, blendFactorSlope,
1066
+ tweening_frames_schedule, color_correction_factor'''
1067
+ ).replace("\n", "").replace("\r", "").replace(" ", "").split(',')
1068
+
1069
+ component_names = ['override_settings_with_file', 'custom_settings_file'] + anim_args_names +['animation_prompts', 'animation_prompts_positive', 'animation_prompts_negative'] + args_names + video_args_names + parseq_args_names + hybrid_args_names + loop_args_names + controlnet_component_names()
1070
+ settings_component_names = [name for name in component_names if name not in video_args_names]
1071
+
1072
+ def setup_deforum_setting_ui(self, is_img2img, is_extension = True):
1073
+ ds = setup_deforum_setting_dictionary(self, is_img2img, is_extension)
1074
+ return [ds[name] for name in (['btn'] + component_names)]
1075
+
1076
+ def pack_anim_args(args_dict):
1077
+ return {name: args_dict[name] for name in (anim_args_names + hybrid_args_names)}
1078
+
1079
+ def pack_args(args_dict):
1080
+ args_dict = {name: args_dict[name] for name in args_names}
1081
+ args_dict['precision'] = 'autocast'
1082
+ args_dict['scale'] = 7
1083
+ args_dict['subseed'] = -1
1084
+ args_dict['subseed_strength'] = 0
1085
+ args_dict['C'] = 4
1086
+ args_dict['f'] = 8
1087
+ args_dict['timestring'] = ""
1088
+ args_dict['init_latent'] = None
1089
+ args_dict['init_sample'] = None
1090
+ args_dict['init_c'] = None
1091
+ args_dict['noise_mask'] = None
1092
+ args_dict['seed_internal'] = 0
1093
+ return args_dict
1094
+
1095
+ def pack_video_args(args_dict):
1096
+ return {name: args_dict[name] for name in video_args_names}
1097
+
1098
+ def pack_parseq_args(args_dict):
1099
+ return {name: args_dict[name] for name in parseq_args_names}
1100
+
1101
+ def pack_loop_args(args_dict):
1102
+ return {name: args_dict[name] for name in loop_args_names}
1103
+
1104
+ def pack_controlnet_args(args_dict):
1105
+ return {name: args_dict[name] for name in controlnet_component_names()}
1106
+
1107
+ def process_args(args_dict_main):
1108
+ override_settings_with_file = args_dict_main['override_settings_with_file']
1109
+ custom_settings_file = args_dict_main['custom_settings_file']
1110
+ args_dict = pack_args(args_dict_main)
1111
+ anim_args_dict = pack_anim_args(args_dict_main)
1112
+ video_args_dict = pack_video_args(args_dict_main)
1113
+ parseq_args_dict = pack_parseq_args(args_dict_main)
1114
+ loop_args_dict = pack_loop_args(args_dict_main)
1115
+ controlnet_args_dict = pack_controlnet_args(args_dict_main)
1116
+
1117
+ import json
1118
+
1119
+ root = SimpleNamespace(**Root())
1120
+ root.p = args_dict_main['p']
1121
+ p = root.p
1122
+ root.animation_prompts = json.loads(args_dict_main['animation_prompts'])
1123
+ positive_prompts = args_dict_main['animation_prompts_positive']
1124
+ negative_prompts = args_dict_main['animation_prompts_negative']
1125
+ # remove --neg from negative_prompts if recieved by mistake
1126
+ negative_prompts = negative_prompts.replace('--neg', '')
1127
+ for key in root.animation_prompts:
1128
+ animationPromptCurr = root.animation_prompts[key]
1129
+ root.animation_prompts[key] = f"{positive_prompts} {animationPromptCurr} {'' if '--neg' in animationPromptCurr else '--neg'} {negative_prompts}"
1130
+ from deforum_helpers.settings import load_args
1131
+
1132
+ if override_settings_with_file:
1133
+ load_args(args_dict, anim_args_dict, parseq_args_dict, loop_args_dict, controlnet_args_dict, custom_settings_file, root)
1134
+
1135
+ if not os.path.exists(root.models_path):
1136
+ os.mkdir(root.models_path)
1137
+
1138
+ args = SimpleNamespace(**args_dict)
1139
+ anim_args = SimpleNamespace(**anim_args_dict)
1140
+ video_args = SimpleNamespace(**video_args_dict)
1141
+ parseq_args = SimpleNamespace(**parseq_args_dict)
1142
+ loop_args = SimpleNamespace(**loop_args_dict)
1143
+ controlnet_args = SimpleNamespace(**controlnet_args_dict)
1144
+
1145
+ p.width, p.height = map(lambda x: x - x % 64, (args.W, args.H))
1146
+ p.steps = args.steps
1147
+ p.seed = args.seed
1148
+ p.sampler_name = args.sampler
1149
+ p.batch_size = args.n_batch
1150
+ p.tiling = args.tiling
1151
+ p.restore_faces = args.restore_faces
1152
+ p.seed_enable_extras = args.seed_enable_extras
1153
+ p.subseed = args.subseed
1154
+ p.subseed_strength = args.subseed_strength
1155
+ p.seed_resize_from_w = args.seed_resize_from_w
1156
+ p.seed_resize_from_h = args.seed_resize_from_h
1157
+ p.fill = args.fill
1158
+ p.ddim_eta = args.ddim_eta
1159
+
1160
+ # TODO: Handle batch name dynamically?
1161
+ current_arg_list = [args, anim_args, video_args, parseq_args]
1162
+ args.outdir = os.path.join(p.outpath_samples, args.batch_name)
1163
+ root.outpath_samples = args.outdir
1164
+ args.outdir = os.path.join(os.getcwd(), args.outdir)
1165
+ if not os.path.exists(args.outdir):
1166
+ os.makedirs(args.outdir)
1167
+
1168
+ args.seed = get_fixed_seed(args.seed)
1169
+
1170
+ args.timestring = time.strftime('%Y%m%d%H%M%S')
1171
+ args.strength = max(0.0, min(1.0, args.strength))
1172
+
1173
+ if not args.use_init:
1174
+ args.init_image = None
1175
+
1176
+ if anim_args.animation_mode == 'None':
1177
+ anim_args.max_frames = 1
1178
+ elif anim_args.animation_mode == 'Video Input':
1179
+ args.use_init = True
1180
+
1181
+ return root, args, anim_args, video_args, parseq_args, loop_args, controlnet_args
1182
+
1183
+ def print_args(args):
1184
+ print("ARGS: /n")
1185
+ for key, value in args.__dict__.items():
1186
+ print(f"{key}: {value}")
1187
+
1188
+ # Local gradio-to-frame-interoplation function. *Needs* to stay here since we do Root() and use gradio elements directly, to be changed in the future
1189
+ def upload_vid_to_interpolate(file, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, in_vid_fps):
1190
+ # print msg and do nothing if vid not uploaded or interp_x not provided
1191
+ if not file or engine == 'None':
1192
+ return print("Please upload a video and set a proper value for 'Interp X'. Can't interpolate x0 times :)")
1193
+
1194
+ root_params = Root()
1195
+ f_models_path = root_params['models_path']
1196
+
1197
+ process_interp_vid_upload_logic(file, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, in_vid_fps, f_models_path, file.orig_name)
1198
+
1199
+ # Local gradio-to-upscalers function. *Needs* to stay here since we do Root() and use gradio elements directly, to be changed in the future
1200
+ def upload_vid_to_upscale(vid_to_upscale_chosen_file, selected_tab, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_keep_imgs, ffmpeg_location, ffmpeg_crf, ffmpeg_preset):
1201
+ # print msg and do nothing if vid not uploaded
1202
+ if not vid_to_upscale_chosen_file:
1203
+ return print("Please upload a video :)")
1204
+
1205
+ process_upscale_vid_upload_logic(vid_to_upscale_chosen_file, selected_tab, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, vid_to_upscale_chosen_file.orig_name, upscale_keep_imgs, ffmpeg_location, ffmpeg_crf, ffmpeg_preset)
1206
+
1207
+ def ncnn_upload_vid_to_upscale(vid_path, in_vid_fps, in_vid_res, out_vid_res, upscale_model, upscale_factor, keep_imgs, f_location, f_crf, f_preset):
1208
+ if vid_path is None:
1209
+ print("Please upload a video :)")
1210
+ return
1211
+ root_params = Root()
1212
+ f_models_path = root_params['models_path']
1213
+ current_user = root_params['current_user_os']
1214
+ process_ncnn_upscale_vid_upload_logic(vid_path, in_vid_fps, in_vid_res, out_vid_res, f_models_path, upscale_model, upscale_factor, keep_imgs, f_location, f_crf, f_preset, current_user)
extensions/deforum/scripts/deforum_helpers/blank_frame_reroll.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .generate import generate
2
+ #WebUI
3
+ from modules.shared import opts, cmd_opts, state
4
+
5
+ def blank_frame_reroll(image, args, root, frame_idx):
6
+ patience = 10
7
+ print("Blank frame detected! If you don't have the NSFW filter enabled, this may be due to a glitch!")
8
+ if args.reroll_blank_frames == 'reroll':
9
+ while not image.getbbox():
10
+ print("Rerolling with +1 seed...")
11
+ args.seed += 1
12
+ image = generate(args, root, frame_idx)
13
+ patience -= 1
14
+ if patience == 0:
15
+ print("Rerolling with +1 seed failed for 10 iterations! Try setting webui's precision to 'full' and if it fails, please report this to the devs! Interrupting...")
16
+ state.interrupted = True
17
+ state.current_image = image
18
+ return None
19
+ elif args.reroll_blank_frames == 'interrupt':
20
+ print("Interrupting to save your eyes...")
21
+ state.interrupted = True
22
+ state.current_image = image
23
+ return None
24
+ return image
extensions/deforum/scripts/deforum_helpers/colors.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from skimage.exposure import match_histograms
2
+ import cv2
3
+
4
+ def maintain_colors(prev_img, color_match_sample, mode):
5
+ if mode == 'Match Frame 0 RGB':
6
+ return match_histograms(prev_img, color_match_sample, multichannel=True)
7
+ elif mode == 'Match Frame 0 HSV':
8
+ prev_img_hsv = cv2.cvtColor(prev_img, cv2.COLOR_RGB2HSV)
9
+ color_match_hsv = cv2.cvtColor(color_match_sample, cv2.COLOR_RGB2HSV)
10
+ matched_hsv = match_histograms(prev_img_hsv, color_match_hsv, multichannel=True)
11
+ return cv2.cvtColor(matched_hsv, cv2.COLOR_HSV2RGB)
12
+ else: # Match Frame 0 LAB
13
+ prev_img_lab = cv2.cvtColor(prev_img, cv2.COLOR_RGB2LAB)
14
+ color_match_lab = cv2.cvtColor(color_match_sample, cv2.COLOR_RGB2LAB)
15
+ matched_lab = match_histograms(prev_img_lab, color_match_lab, multichannel=True)
16
+ return cv2.cvtColor(matched_lab, cv2.COLOR_LAB2RGB)
extensions/deforum/scripts/deforum_helpers/composable_masks.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # At the moment there are three types of masks: mask from variable, file mask and word mask
2
+ # Variable masks include init_mask for the predefined whole-video mask, frame_mask from video-masking system
3
+ # and human_mask for a model which better segments people in the background video
4
+ # They are put in {}-brackets
5
+ # Word masks are framed with <>-bracets, like: <cat>, <anime girl>
6
+ # File masks are put in []-brackes
7
+ # Empty strings are counted as the whole frame
8
+ # We want to put them all into a sequence of boolean operations
9
+
10
+ # Example:
11
+ # \ <armor>
12
+ # (({human_mask} & [mask1.png]) ^ <apple>)
13
+
14
+ # Writing the parser for the boolean sequence
15
+ # using regex and PIL operations
16
+ import re
17
+ from .load_images import get_mask_from_file, check_mask_for_errors, blank_if_none
18
+ from .word_masking import get_word_mask
19
+ from torch import Tensor
20
+ import PIL
21
+ from PIL import Image, ImageChops
22
+
23
+ # val_masks: name, PIL Image mask
24
+ # Returns an image in mode '1' (needed for bool ops), convert to 'L' in the sender function
25
+ def compose_mask(root, args, mask_seq, val_masks, frame_image, inner_idx:int = 0):
26
+ # Compose_mask recursively: go to inner brackets, then b-op it and go upstack
27
+
28
+ # Step 1:
29
+ # recursive parenthesis pass
30
+ # regex is not powerful here
31
+
32
+ seq = ""
33
+ inner_seq = ""
34
+ parentheses_counter = 0
35
+
36
+ for c in mask_seq:
37
+ if c == ')':
38
+ parentheses_counter = parentheses_counter - 1
39
+ if parentheses_counter > 0:
40
+ inner_seq += c
41
+ if c == '(':
42
+ parentheses_counter = parentheses_counter + 1
43
+ if parentheses_counter == 0:
44
+ if len(inner_seq) > 0:
45
+ inner_idx += 1
46
+ seq += compose_mask(root, args, inner_seq, val_masks, frame_image, inner_idx)
47
+ inner_seq = ""
48
+ else:
49
+ seq += c
50
+
51
+ if parentheses_counter != 0:
52
+ raise Exception('Mismatched parentheses in {mask_seq}!')
53
+
54
+ mask_seq = seq
55
+
56
+ # Step 2:
57
+ # Load the word masks and file masks as vars
58
+
59
+ # File masks
60
+ pattern = r'\[(?P<inner>[\S\s]*?)\]'
61
+
62
+ def parse(match_object):
63
+ nonlocal inner_idx
64
+ inner_idx += 1
65
+ content = match_object.groupdict()['inner']
66
+ val_masks[str(inner_idx)] = get_mask_from_file(content, args).convert('1') # TODO: add caching
67
+ return f"{{{inner_idx}}}"
68
+
69
+ mask_seq = re.sub(pattern, parse, mask_seq)
70
+
71
+ # Word masks
72
+ pattern = r'<(?P<inner>[\S\s]*?)>'
73
+
74
+ def parse(match_object):
75
+ nonlocal inner_idx
76
+ inner_idx += 1
77
+ content = match_object.groupdict()['inner']
78
+ val_masks[str(inner_idx)] = get_word_mask(root, frame_image, content).convert('1')
79
+ return f"{{{inner_idx}}}"
80
+
81
+ mask_seq = re.sub(pattern, parse, mask_seq)
82
+
83
+ # Now that all inner parenthesis are eliminated we're left with a linear string
84
+
85
+ # Step 3:
86
+ # Boolean operations with masks
87
+ # Operators: invert !, and &, or |, xor ^, difference \
88
+
89
+ # Invert vars with '!'
90
+ pattern = r'![\S\s]*{(?P<inner>[\S\s]*?)}'
91
+ def parse(match_object):
92
+ nonlocal inner_idx
93
+ inner_idx += 1
94
+ content = match_object.groupdict()['inner']
95
+ savename = content
96
+ if content in root.mask_preset_names:
97
+ inner_idx += 1
98
+ savename = str(inner_idx)
99
+ val_masks[savename] = ImageChops.invert(val_masks[content])
100
+ return f"{{{savename}}}"
101
+
102
+ mask_seq = re.sub(pattern, parse, mask_seq)
103
+
104
+ # Multiply neighbouring vars with '&'
105
+ # Wait for replacements stall (like in Markov chains)
106
+ while True:
107
+ pattern = r'{(?P<inner1>[\S\s]*?)}[\s]*&[\s]*{(?P<inner2>[\S\s]*?)}'
108
+ def parse(match_object):
109
+ nonlocal inner_idx
110
+ inner_idx += 1
111
+ content = match_object.groupdict()['inner1']
112
+ content_second = match_object.groupdict()['inner2']
113
+ savename = content
114
+ if content in root.mask_preset_names:
115
+ inner_idx += 1
116
+ savename = str(inner_idx)
117
+ val_masks[savename] = ImageChops.logical_and(val_masks[content], val_masks[content_second])
118
+ return f"{{{savename}}}"
119
+
120
+ prev_mask_seq = mask_seq
121
+ mask_seq = re.sub(pattern, parse, mask_seq)
122
+ if mask_seq is prev_mask_seq:
123
+ break
124
+
125
+ # Add neighbouring vars with '|'
126
+ while True:
127
+ pattern = r'{(?P<inner1>[\S\s]*?)}[\s]*?\|[\s]*?{(?P<inner2>[\S\s]*?)}'
128
+ def parse(match_object):
129
+ nonlocal inner_idx
130
+ inner_idx += 1
131
+ content = match_object.groupdict()['inner1']
132
+ content_second = match_object.groupdict()['inner2']
133
+ savename = content
134
+ if content in root.mask_preset_names:
135
+ inner_idx += 1
136
+ savename = str(inner_idx)
137
+ val_masks[savename] = ImageChops.logical_or(val_masks[content], val_masks[content_second])
138
+ return f"{{{savename}}}"
139
+
140
+ prev_mask_seq = mask_seq
141
+ mask_seq = re.sub(pattern, parse, mask_seq)
142
+ if mask_seq is prev_mask_seq:
143
+ break
144
+
145
+ # Mutually exclude neighbouring vars with '^'
146
+ while True:
147
+ pattern = r'{(?P<inner1>[\S\s]*?)}[\s]*\^[\s]*{(?P<inner2>[\S\s]*?)}'
148
+ def parse(match_object):
149
+ nonlocal inner_idx
150
+ inner_idx += 1
151
+ content = match_object.groupdict()['inner1']
152
+ content_second = match_object.groupdict()['inner2']
153
+ savename = content
154
+ if content in root.mask_preset_names:
155
+ inner_idx += 1
156
+ savename = str(inner_idx)
157
+ val_masks[savename] = ImageChops.logical_xor(val_masks[content], val_masks[content_second])
158
+ return f"{{{savename}}}"
159
+
160
+ prev_mask_seq = mask_seq
161
+ mask_seq = re.sub(pattern, parse, mask_seq)
162
+ if mask_seq is prev_mask_seq:
163
+ break
164
+
165
+ # Set-difference the regions with '\'
166
+ while True:
167
+ pattern = r'{(?P<inner1>[\S\s]*?)}[\s]*\\[\s]*{(?P<inner2>[\S\s]*?)}'
168
+ def parse(match_object):
169
+ content = match_object.groupdict()['inner1']
170
+ content_second = match_object.groupdict()['inner2']
171
+ savename = content
172
+ if content in root.mask_preset_names:
173
+ nonlocal inner_idx
174
+ inner_idx += 1
175
+ savename = str(inner_idx)
176
+ val_masks[savename] = ImageChops.logical_and(val_masks[content], ImageChops.invert(val_masks[content_second]))
177
+ return f"{{{savename}}}"
178
+
179
+ prev_mask_seq = mask_seq
180
+ mask_seq = re.sub(pattern, parse, mask_seq)
181
+ if mask_seq is prev_mask_seq:
182
+ break
183
+
184
+ # Step 4:
185
+ # Output
186
+ # Now we should have a single var left to return. If not, raise an error message
187
+ pattern = r'{(?P<inner>[\S\s]*?)}'
188
+ matches = re.findall(pattern, mask_seq)
189
+
190
+ if len(matches) != 1:
191
+ raise Exception(f'Wrong composable mask expression format! Broken mask sequence: {mask_seq}')
192
+
193
+ return f"{{{matches[0]}}}"
194
+
195
+ def compose_mask_with_check(root, args, mask_seq, val_masks, frame_image):
196
+ for k, v in val_masks.items():
197
+ val_masks[k] = blank_if_none(v, args.W, args.H, '1').convert('1')
198
+ return check_mask_for_errors(val_masks[compose_mask(root, args, mask_seq, val_masks, frame_image, 0)[1:-1]].convert('L'))
extensions/deforum/scripts/deforum_helpers/deforum_controlnet.py ADDED
@@ -0,0 +1,462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This helper script is responsible for ControlNet/Deforum integration
2
+ # https://github.com/Mikubill/sd-webui-controlnet — controlnet repo
3
+
4
+ import os, sys
5
+ import gradio as gr
6
+ import scripts
7
+ import modules.scripts as scrpts
8
+ from PIL import Image
9
+ import numpy as np
10
+ from modules.processing import process_images
11
+ from .rich import console
12
+ from rich.table import Table
13
+ from rich import box
14
+
15
+ has_controlnet = None
16
+
17
+ def find_controlnet():
18
+ global has_controlnet
19
+ if has_controlnet is not None:
20
+ return has_controlnet
21
+
22
+ try:
23
+ from scripts import controlnet
24
+ except Exception as e:
25
+ print(f'\033[33mFailed to import controlnet! The exact error is {e}. Deforum support for ControlNet will not be activated\033[0m')
26
+ has_controlnet = False
27
+ return False
28
+ has_controlnet = True
29
+ print(f"\033[0;32m*Deforum ControlNet support: enabled*\033[0m")
30
+ return True
31
+
32
+ # The most parts below are plainly copied from controlnet.py
33
+ # TODO: come up with a cleaner way
34
+
35
+ gradio_compat = True
36
+ try:
37
+ from distutils.version import LooseVersion
38
+ from importlib_metadata import version
39
+ if LooseVersion(version("gradio")) < LooseVersion("3.10"):
40
+ gradio_compat = False
41
+ except ImportError:
42
+ pass
43
+
44
+ # svgsupports
45
+ svgsupport = False
46
+ try:
47
+ import io
48
+ import base64
49
+ from svglib.svglib import svg2rlg
50
+ from reportlab.graphics import renderPM
51
+ svgsupport = True
52
+ except ImportError:
53
+ pass
54
+
55
+ def ControlnetArgs():
56
+ controlnet_enabled = False
57
+ controlnet_scribble_mode = False
58
+ controlnet_rgbbgr_mode = False
59
+ controlnet_lowvram = False
60
+ controlnet_module = "none"
61
+ controlnet_model = "None"
62
+ controlnet_weight = 1.0
63
+ controlnet_guidance_strength = 1.0
64
+ blendFactorMax = "0:(0.35)"
65
+ blendFactorSlope = "0:(0.25)"
66
+ tweening_frames_schedule = "0:(20)"
67
+ color_correction_factor = "0:(0.075)"
68
+ return locals()
69
+
70
+ def setup_controlnet_ui_raw():
71
+ # Already under an accordion
72
+ from scripts import controlnet
73
+ from scripts.controlnet import update_cn_models, cn_models, cn_models_names
74
+
75
+ refresh_symbol = '\U0001f504' # 🔄
76
+ switch_values_symbol = '\U000021C5' # ⇅
77
+ model_dropdowns = []
78
+ infotext_fields = []
79
+ # Main part
80
+ class ToolButton(gr.Button, gr.components.FormComponent):
81
+ """Small button with single emoji as text, fits inside gradio forms"""
82
+
83
+ def __init__(self, **kwargs):
84
+ super().__init__(variant="tool", **kwargs)
85
+
86
+ def get_block_name(self):
87
+ return "button"
88
+
89
+ from scripts.processor import canny, midas, midas_normal, leres, hed, mlsd, openpose, pidinet, simple_scribble, fake_scribble, uniformer
90
+
91
+ preprocessor = {
92
+ "none": lambda x, *args, **kwargs: x,
93
+ "canny": canny,
94
+ "depth": midas,
95
+ "depth_leres": leres,
96
+ "hed": hed,
97
+ "mlsd": mlsd,
98
+ "normal_map": midas_normal,
99
+ "openpose": openpose,
100
+ # "openpose_hand": openpose_hand,
101
+ "pidinet": pidinet,
102
+ # "scribble": simple_scribble,
103
+ "fake_scribble": fake_scribble,
104
+ "segmentation": uniformer,
105
+ }
106
+
107
+ # Copying the main ControlNet widgets while getting rid of static elements such as the scribble pad
108
+ with gr.Row():
109
+ controlnet_enabled = gr.Checkbox(label='Enable', value=False)
110
+ controlnet_scribble_mode = gr.Checkbox(label='Scribble Mode (Invert colors)', value=False, visible=False)
111
+ controlnet_rgbbgr_mode = gr.Checkbox(label='RGB to BGR', value=False, visible=False)
112
+ controlnet_lowvram = gr.Checkbox(label='Low VRAM', value=False, visible=False)
113
+
114
+ def refresh_all_models(*inputs):
115
+ update_cn_models()
116
+
117
+ dd = inputs[0]
118
+ selected = dd if dd in cn_models else "None"
119
+ return gr.Dropdown.update(value=selected, choices=list(cn_models.keys()))
120
+
121
+ with gr.Row(visible=False) as cn_mod_row:
122
+ controlnet_module = gr.Dropdown(list(preprocessor.keys()), label=f"Preprocessor", value="none")
123
+ controlnet_model = gr.Dropdown(list(cn_models.keys()), label=f"Model", value="None")
124
+ refresh_models = ToolButton(value=refresh_symbol)
125
+ refresh_models.click(refresh_all_models, controlnet_model, controlnet_model)
126
+ # ctrls += (refresh_models, )
127
+ with gr.Row(visible=False) as cn_weight_row:
128
+ controlnet_weight = gr.Slider(label=f"Weight", value=1.0, minimum=0.0, maximum=2.0, step=.05)
129
+ controlnet_guidance_strength = gr.Slider(label="Guidance strength (T)", value=1.0, minimum=0.0, maximum=1.0, interactive=True)
130
+ # ctrls += (module, model, weight,)
131
+ # model_dropdowns.append(model)
132
+
133
+ # advanced options
134
+ controlnet_advanced = gr.Column(visible=False)
135
+ with controlnet_advanced:
136
+ controlnet_processor_res = gr.Slider(label="Annotator resolution", value=64, minimum=64, maximum=2048, interactive=False)
137
+ controlnet_threshold_a = gr.Slider(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False)
138
+ controlnet_threshold_b = gr.Slider(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False)
139
+
140
+ if gradio_compat:
141
+ controlnet_module.change(build_sliders, inputs=[controlnet_module], outputs=[controlnet_processor_res, controlnet_threshold_a, controlnet_threshold_b, controlnet_advanced])
142
+
143
+ infotext_fields.extend([
144
+ (controlnet_module, f"ControlNet Preprocessor"),
145
+ (controlnet_model, f"ControlNet Model"),
146
+ (controlnet_weight, f"ControlNet Weight"),
147
+ ])
148
+
149
+ with gr.Row(visible=False) as cn_env_row:
150
+ controlnet_resize_mode = gr.Radio(choices=["Envelope (Outer Fit)", "Scale to Fit (Inner Fit)", "Just Resize"], value="Scale to Fit (Inner Fit)", label="Resize Mode")
151
+
152
+ # Video input to be fed into ControlNet
153
+ #input_video_url = gr.Textbox(source='upload', type='numpy', tool='sketch') # TODO
154
+ controlnet_input_video_chosen_file = gr.File(label="ControlNet Video Input", interactive=True, file_count="single", file_types=["video"], elem_id="controlnet_input_video_chosen_file", visible=False)
155
+ controlnet_input_video_mask_chosen_file = gr.File(label="ControlNet Video Mask Input", interactive=True, file_count="single", file_types=["video"], elem_id="controlnet_input_video_mask_chosen_file", visible=False)
156
+
157
+ cn_hide_output_list = [controlnet_scribble_mode,controlnet_rgbbgr_mode,controlnet_lowvram,cn_mod_row,cn_weight_row,cn_env_row,controlnet_input_video_chosen_file,controlnet_input_video_mask_chosen_file]
158
+ for cn_output in cn_hide_output_list:
159
+ controlnet_enabled.change(fn=hide_ui_by_cn_status, inputs=controlnet_enabled,outputs=cn_output)
160
+
161
+ return locals()
162
+
163
+
164
+ def setup_controlnet_ui():
165
+ if not find_controlnet():
166
+ gr.HTML("""
167
+ <a style='target='_blank' href='https://github.com/Mikubill/sd-webui-controlnet'>ControlNet not found. Please install it :)</a>
168
+ """, elem_id='controlnet_not_found_html_msg')
169
+ return {}
170
+
171
+ return setup_controlnet_ui_raw()
172
+
173
+ def controlnet_component_names():
174
+ if not find_controlnet():
175
+ return []
176
+
177
+ controlnet_args_names = str(r'''controlnet_input_video_chosen_file, controlnet_input_video_mask_chosen_file,
178
+ controlnet_enabled, controlnet_scribble_mode, controlnet_rgbbgr_mode, controlnet_lowvram,
179
+ controlnet_module, controlnet_model,
180
+ controlnet_weight, controlnet_guidance_strength,
181
+ controlnet_processor_res,
182
+ controlnet_threshold_a, controlnet_threshold_b, controlnet_resize_mode'''
183
+ ).replace("\n", "").replace("\r", "").replace(" ", "").split(',')
184
+
185
+ return controlnet_args_names
186
+
187
+ def is_controlnet_enabled(controlnet_args):
188
+ return 'controlnet_enabled' in vars(controlnet_args) and controlnet_args.controlnet_enabled
189
+
190
+ def process_txt2img_with_controlnet(p, args, anim_args, loop_args, controlnet_args, root, frame_idx = 1):
191
+ # TODO: use init image and mask here
192
+ p.control_net_enabled = False # we don't want to cause concurrence
193
+ p.init_images = []
194
+ controlnet_frame_path = os.path.join(args.outdir, 'controlnet_inputframes', f"{frame_idx:05}.jpg")
195
+ controlnet_mask_frame_path = os.path.join(args.outdir, 'controlnet_maskframes', f"{frame_idx:05}.jpg")
196
+ cn_mask_np = None
197
+ cn_image_np = None
198
+
199
+ if not os.path.exists(controlnet_frame_path) and not os.path.exists(controlnet_mask_frame_path):
200
+ print(f'\033[33mNeither the base nor the masking frames for ControlNet were found. Using the regular pipeline\033[0m')
201
+ from .deforum_controlnet_hardcode import restore_networks
202
+ unet = p.sd_model.model.diffusion_model
203
+ restore_networks(unet)
204
+ return process_images(p)
205
+
206
+ if os.path.exists(controlnet_frame_path):
207
+ cn_image_np = Image.open(controlnet_frame_path).convert("RGB")
208
+
209
+ if os.path.exists(controlnet_mask_frame_path):
210
+ cn_mask_np = Image.open(controlnet_mask_frame_path).convert("RGB")
211
+
212
+ cn_args = {
213
+ "enabled": True,
214
+ "module": controlnet_args.controlnet_module,
215
+ "model": controlnet_args.controlnet_model,
216
+ "weight": controlnet_args.controlnet_weight,
217
+ "input_image": {'image': cn_image_np, 'mask': cn_mask_np},
218
+ "scribble_mode": controlnet_args.controlnet_scribble_mode,
219
+ "resize_mode": controlnet_args.controlnet_resize_mode,
220
+ "rgbbgr_mode": controlnet_args.controlnet_rgbbgr_mode,
221
+ "lowvram": controlnet_args.controlnet_lowvram,
222
+ "processor_res": controlnet_args.controlnet_processor_res,
223
+ "threshold_a": controlnet_args.controlnet_threshold_a,
224
+ "threshold_b": controlnet_args.controlnet_threshold_b,
225
+ "guidance_strength": controlnet_args.controlnet_guidance_strength,"guidance_strength": controlnet_args.controlnet_guidance_strength,
226
+ }
227
+
228
+ from .deforum_controlnet_hardcode import process
229
+ p.script_args = (
230
+ cn_args["enabled"],
231
+ cn_args["module"],
232
+ cn_args["model"],
233
+ cn_args["weight"],
234
+ cn_args["input_image"],
235
+ cn_args["scribble_mode"],
236
+ cn_args["resize_mode"],
237
+ cn_args["rgbbgr_mode"],
238
+ cn_args["lowvram"],
239
+ cn_args["processor_res"],
240
+ cn_args["threshold_a"],
241
+ cn_args["threshold_b"],
242
+ cn_args["guidance_strength"],
243
+ )
244
+
245
+ table = Table(title="ControlNet params",padding=0, box=box.ROUNDED)
246
+
247
+ field_names = []
248
+ field_names += ["module", "model", "weight", "guidance", "scribble", "resize", "rgb->bgr", "proc res", "thr a", "thr b"]
249
+ for field_name in field_names:
250
+ table.add_column(field_name, justify="center")
251
+
252
+ rows = []
253
+ rows += [cn_args["module"], cn_args["model"], cn_args["weight"], cn_args["guidance_strength"], cn_args["scribble_mode"], cn_args["resize_mode"], cn_args["rgbbgr_mode"], cn_args["processor_res"], cn_args["threshold_a"], cn_args["threshold_b"]]
254
+ rows = [str(x) for x in rows]
255
+
256
+ table.add_row(*rows)
257
+
258
+ console.print(table)
259
+
260
+ processed = process(p, *(p.script_args))
261
+
262
+ if processed is None: # the script just swaps the pipeline, so failing is OK for the first time
263
+ processed = process_images(p)
264
+
265
+ if processed is None: # now it's definitely not OK
266
+ raise Exception("\033[31mFailed to process a frame with ControlNet enabled!\033[0m")
267
+
268
+ p.close()
269
+
270
+ return processed
271
+
272
+ def process_img2img_with_controlnet(p, args, anim_args, loop_args, controlnet_args, root, frame_idx = 0):
273
+ p.control_net_enabled = False # we don't want to cause concurrence
274
+ controlnet_frame_path = os.path.join(args.outdir, 'controlnet_inputframes', f"{frame_idx:05}.jpg")
275
+ controlnet_mask_frame_path = os.path.join(args.outdir, 'controlnet_maskframes', f"{frame_idx:05}.jpg")
276
+
277
+ print(f'Reading ControlNet base frame {frame_idx} at {controlnet_frame_path}')
278
+ print(f'Reading ControlNet mask frame {frame_idx} at {controlnet_mask_frame_path}')
279
+
280
+ cn_mask_np = None
281
+ cn_image_np = None
282
+
283
+ if not os.path.exists(controlnet_frame_path) and not os.path.exists(controlnet_mask_frame_path):
284
+ print(f'\033[33mNeither the base nor the masking frames for ControlNet were found. Using the regular pipeline\033[0m')
285
+ return process_images(p)
286
+
287
+ if os.path.exists(controlnet_frame_path):
288
+ cn_image_np = np.array(Image.open(controlnet_frame_path).convert("RGB")).astype('uint8')
289
+
290
+ if os.path.exists(controlnet_mask_frame_path):
291
+ cn_mask_np = np.array(Image.open(controlnet_mask_frame_path).convert("RGB")).astype('uint8')
292
+
293
+ cn_args = {
294
+ "enabled": True,
295
+ "module": controlnet_args.controlnet_module,
296
+ "model": controlnet_args.controlnet_model,
297
+ "weight": controlnet_args.controlnet_weight,
298
+ "input_image": {'image': cn_image_np, 'mask': cn_mask_np},
299
+ "scribble_mode": controlnet_args.controlnet_scribble_mode,
300
+ "resize_mode": controlnet_args.controlnet_resize_mode,
301
+ "rgbbgr_mode": controlnet_args.controlnet_rgbbgr_mode,
302
+ "lowvram": controlnet_args.controlnet_lowvram,
303
+ "processor_res": controlnet_args.controlnet_processor_res,
304
+ "threshold_a": controlnet_args.controlnet_threshold_a,
305
+ "threshold_b": controlnet_args.controlnet_threshold_b,
306
+ "guidance_strength": controlnet_args.controlnet_guidance_strength,
307
+ }
308
+
309
+ from .deforum_controlnet_hardcode import process
310
+ p.script_args = (
311
+ cn_args["enabled"],
312
+ cn_args["module"],
313
+ cn_args["model"],
314
+ cn_args["weight"],
315
+ cn_args["input_image"],
316
+ cn_args["scribble_mode"],
317
+ cn_args["resize_mode"],
318
+ cn_args["rgbbgr_mode"],
319
+ cn_args["lowvram"],
320
+ cn_args["processor_res"],
321
+ cn_args["threshold_a"],
322
+ cn_args["threshold_b"],
323
+ cn_args["guidance_strength"],
324
+ )
325
+
326
+ table = Table(title="ControlNet params",padding=0, box=box.ROUNDED)
327
+
328
+ field_names = []
329
+ field_names += ["module", "model", "weight", "guidance", "scribble", "resize", "rgb->bgr", "proc res", "thr a", "thr b"]
330
+ for field_name in field_names:
331
+ table.add_column(field_name, justify="center")
332
+
333
+ rows = []
334
+ rows += [cn_args["module"], cn_args["model"], cn_args["weight"], cn_args["guidance_strength"], cn_args["scribble_mode"], cn_args["resize_mode"], cn_args["rgbbgr_mode"], cn_args["processor_res"], cn_args["threshold_a"], cn_args["threshold_b"]]
335
+ rows = [str(x) for x in rows]
336
+
337
+ table.add_row(*rows)
338
+
339
+ console.print(table)
340
+
341
+ processed = process(p, *(p.script_args))
342
+
343
+ if processed is None: # the script just swaps the pipeline, so failing is OK for the first time
344
+ processed = process_images(p)
345
+
346
+ if processed is None: # now it's definitely not OK
347
+ raise Exception("\033[31mFailed to process a frame with ControlNet enabled!\033[0m")
348
+
349
+ p.close()
350
+
351
+ return processed
352
+
353
+ import pathlib
354
+ from .video_audio_utilities import vid2frames
355
+
356
+ def unpack_controlnet_vids(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, animation_prompts, root):
357
+ if controlnet_args.controlnet_input_video_chosen_file is not None and len(controlnet_args.controlnet_input_video_chosen_file.name) > 0:
358
+ print(f'Unpacking ControlNet base video')
359
+ # create a folder for the video input frames to live in
360
+ mask_in_frame_path = os.path.join(args.outdir, 'controlnet_inputframes')
361
+ os.makedirs(mask_in_frame_path, exist_ok=True)
362
+
363
+ # save the video frames from mask video
364
+ print(f"Exporting Video Frames (1 every {anim_args.extract_nth_frame}) frames to {mask_in_frame_path}...")
365
+ vid2frames(video_path=controlnet_args.controlnet_input_video_chosen_file.name, video_in_frame_path=mask_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame, numeric_files_output=True)
366
+
367
+ print(f"Loading {anim_args.max_frames} input frames from {mask_in_frame_path} and saving video frames to {args.outdir}")
368
+ print(f'ControlNet base video unpacked!')
369
+
370
+ if controlnet_args.controlnet_input_video_mask_chosen_file is not None and len(controlnet_args.controlnet_input_video_mask_chosen_file.name) > 0:
371
+ print(f'Unpacking ControlNet video mask')
372
+ # create a folder for the video input frames to live in
373
+ mask_in_frame_path = os.path.join(args.outdir, 'controlnet_maskframes')
374
+ os.makedirs(mask_in_frame_path, exist_ok=True)
375
+
376
+ # save the video frames from mask video
377
+ print(f"Exporting Video Frames (1 every {anim_args.extract_nth_frame}) frames to {mask_in_frame_path}...")
378
+ vid2frames(video_path=controlnet_args.controlnet_input_video_mask_chosen_file.name, video_in_frame_path=mask_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame, numeric_files_output=True)
379
+
380
+ print(f"Loading {anim_args.max_frames} input frames from {mask_in_frame_path} and saving video frames to {args.outdir}")
381
+ print(f'ControlNet video mask unpacked!')
382
+
383
+ def hide_ui_by_cn_status(choice):
384
+ return gr.update(visible=True) if choice else gr.update(visible=False)
385
+
386
+ def build_sliders(cn_model):
387
+ if cn_model == "canny":
388
+ return [
389
+ gr.update(label="Annotator resolution", value=512, minimum=64, maximum=2048, step=1, interactive=True),
390
+ gr.update(label="Canny low threshold", minimum=1, maximum=255, value=100, step=1, interactive=True),
391
+ gr.update(label="Canny high threshold", minimum=1, maximum=255, value=200, step=1, interactive=True),
392
+ gr.update(visible=True)
393
+ ]
394
+ elif cn_model == "mlsd": #Hough
395
+ return [
396
+ gr.update(label="Hough Resolution", minimum=64, maximum=2048, value=512, step=1, interactive=True),
397
+ gr.update(label="Hough value threshold (MLSD)", minimum=0.01, maximum=2.0, value=0.1, step=0.01, interactive=True),
398
+ gr.update(label="Hough distance threshold (MLSD)", minimum=0.01, maximum=20.0, value=0.1, step=0.01, interactive=True),
399
+ gr.update(visible=True)
400
+ ]
401
+ elif cn_model in ["hed", "fake_scribble"]:
402
+ return [
403
+ gr.update(label="HED Resolution", minimum=64, maximum=2048, value=512, step=1, interactive=True),
404
+ gr.update(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False),
405
+ gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False),
406
+ gr.update(visible=True)
407
+ ]
408
+ elif cn_model in ["openpose", "openpose_hand", "segmentation"]:
409
+ return [
410
+ gr.update(label="Annotator Resolution", minimum=64, maximum=2048, value=512, step=1, interactive=True),
411
+ gr.update(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False),
412
+ gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False),
413
+ gr.update(visible=True)
414
+ ]
415
+ elif cn_model == "depth":
416
+ return [
417
+ gr.update(label="Midas Resolution", minimum=64, maximum=2048, value=384, step=1, interactive=True),
418
+ gr.update(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False),
419
+ gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False),
420
+ gr.update(visible=True)
421
+ ]
422
+ elif cn_model == "depth_leres":
423
+ return [
424
+ gr.update(label="LeReS Resolution", minimum=64, maximum=2048, value=512, step=1, interactive=True),
425
+ gr.update(label="Remove Near %", value=0, minimum=0, maximum=100, step=0.1, interactive=True),
426
+ gr.update(label="Remove Background %", value=0, minimum=0, maximum=100, step=0.1, interactive=True),
427
+ gr.update(visible=True)
428
+ ]
429
+ elif cn_model == "normal_map":
430
+ return [
431
+ gr.update(label="Normal Resolution", minimum=64, maximum=2048, value=512, step=1, interactive=True),
432
+ gr.update(label="Normal background threshold", minimum=0.0, maximum=1.0, value=0.4, step=0.01, interactive=True),
433
+ gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False),
434
+ gr.update(visible=True)
435
+ ]
436
+ elif cn_model == "none":
437
+ return [
438
+ gr.update(label="Normal Resolution", value=64, minimum=64, maximum=2048, interactive=False),
439
+ gr.update(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False),
440
+ gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False),
441
+ gr.update(visible=False)
442
+ ]
443
+ else:
444
+ return [
445
+ gr.update(label="Annotator resolution", value=512, minimum=64, maximum=2048, step=1, interactive=True),
446
+ gr.update(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False),
447
+ gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False),
448
+ gr.update(visible=True)
449
+ ]
450
+
451
+ # def svgPreprocess(inputs):
452
+ # if (inputs):
453
+ # if (inputs['image'].startswith("data:image/svg+xml;base64,") and svgsupport):
454
+ # svg_data = base64.b64decode(inputs['image'].replace('data:image/svg+xml;base64,',''))
455
+ # drawing = svg2rlg(io.BytesIO(svg_data))
456
+ # png_data = renderPM.drawToString(drawing, fmt='PNG')
457
+ # encoded_string = base64.b64encode(png_data)
458
+ # base64_str = str(encoded_string, "utf-8")
459
+ # base64_str = "data:image/png;base64,"+ base64_str
460
+ # inputs['image'] = base64_str
461
+ # return input_image.orgpreprocess(inputs)
462
+ # return None
extensions/deforum/scripts/deforum_helpers/deforum_controlnet_hardcode.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TODO HACK FIXME HARDCODE — as using the scripts doesn't seem to work for some reason
2
+ deforum_latest_network = None
3
+ deforum_latest_params = (None, 'placeholder to trigger the model loading')
4
+ deforum_input_image = None
5
+ from scripts.processor import unload_hed, unload_mlsd, unload_midas, unload_leres, unload_pidinet, unload_openpose, unload_uniformer, HWC3
6
+ import modules.shared as shared
7
+ import modules.devices as devices
8
+ import modules.processing as processing
9
+ from modules.processing import StableDiffusionProcessingImg2Img, StableDiffusionProcessingTxt2Img
10
+ import numpy as np
11
+ from scripts.controlnet import update_cn_models, cn_models, cn_models_names
12
+ import os
13
+ import modules.scripts as scrpts
14
+ import torch
15
+ from scripts.cldm import PlugableControlModel
16
+ from scripts.adapter import PlugableAdapter
17
+ from scripts.utils import load_state_dict
18
+ from torchvision.transforms import Resize, InterpolationMode, CenterCrop, Compose
19
+ from einops import rearrange
20
+ cn_models_dir = os.path.join(scrpts.basedir(), "models")
21
+ default_conf_adapter = os.path.join(cn_models_dir, "sketch_adapter_v14.yaml")
22
+ default_conf = os.path.join(cn_models_dir, "cldm_v15.yaml")
23
+ unloadable = {
24
+ "hed": unload_hed,
25
+ "fake_scribble": unload_hed,
26
+ "mlsd": unload_mlsd,
27
+ "depth": unload_midas,
28
+ "depth_leres": unload_leres,
29
+ "normal_map": unload_midas,
30
+ "pidinet": unload_pidinet,
31
+ "openpose": unload_openpose,
32
+ "openpose_hand": unload_openpose,
33
+ "segmentation": unload_uniformer,
34
+ }
35
+ deforum_latest_model_hash = ""
36
+
37
+ def restore_networks(unet):
38
+ global deforum_latest_network
39
+ global deforum_latest_params
40
+ if deforum_latest_network is not None:
41
+ print("restoring last networks")
42
+ deforum_input_image = None
43
+ deforum_latest_network.restore(unet)
44
+ deforum_latest_network = None
45
+
46
+ last_module = deforum_latest_params[0]
47
+ if last_module is not None:
48
+ unloadable.get(last_module, lambda:None)()
49
+
50
+ def process(p, *args):
51
+
52
+ global deforum_latest_network
53
+ global deforum_latest_params
54
+ global deforum_input_image
55
+ global deforum_latest_model_hash
56
+
57
+ unet = p.sd_model.model.diffusion_model
58
+
59
+ enabled, module, model, weight, image, scribble_mode, \
60
+ resize_mode, rgbbgr_mode, lowvram, pres, pthr_a, pthr_b, guidance_strength = args
61
+
62
+ if not enabled:
63
+ restore_networks(unet)
64
+ return
65
+
66
+ models_changed = deforum_latest_params[1] != model \
67
+ or deforum_latest_model_hash != p.sd_model.sd_model_hash or deforum_latest_network == None \
68
+ or (deforum_latest_network is not None and deforum_latest_network.lowvram != lowvram)
69
+
70
+ deforum_latest_params = (module, model)
71
+ deforum_latest_model_hash = p.sd_model.sd_model_hash
72
+ if models_changed:
73
+ restore_networks(unet)
74
+ model_path = cn_models.get(model, None)
75
+
76
+ if model_path is None:
77
+ raise RuntimeError(f"model not found: {model}")
78
+
79
+ # trim '"' at start/end
80
+ if model_path.startswith("\"") and model_path.endswith("\""):
81
+ model_path = model_path[1:-1]
82
+
83
+ if not os.path.exists(model_path):
84
+ raise ValueError(f"file not found: {model_path}")
85
+
86
+ print(f"Loading preprocessor: {module}, model: {model}")
87
+ state_dict = load_state_dict(model_path)
88
+ network_module = PlugableControlModel
89
+ network_config = shared.opts.data.get("control_net_model_config", default_conf)
90
+ if any([k.startswith("body.") for k, v in state_dict.items()]):
91
+ # adapter model
92
+ network_module = PlugableAdapter
93
+ network_config = shared.opts.data.get("control_net_model_adapter_config", default_conf_adapter)
94
+
95
+ network = network_module(
96
+ state_dict=state_dict,
97
+ config_path=network_config,
98
+ weight=weight,
99
+ lowvram=lowvram,
100
+ base_model=unet,
101
+ )
102
+ network.to(p.sd_model.device, dtype=p.sd_model.dtype)
103
+ network.hook(unet, p.sd_model)
104
+
105
+ print(f"ControlNet model {model} loaded.")
106
+ deforum_latest_network = network
107
+
108
+ if image is not None:
109
+ deforum_input_image = HWC3(image['image'])
110
+ if 'mask' in image and image['mask'] is not None and not ((image['mask'][:, :, 0]==0).all() or (image['mask'][:, :, 0]==255).all()):
111
+ print("using mask as input")
112
+ deforum_input_image = HWC3(image['mask'][:, :, 0])
113
+ scribble_mode = True
114
+ else:
115
+ # use img2img init_image as default
116
+ deforum_input_image = getattr(p, "init_images", [None])[0]
117
+ if deforum_input_image is None:
118
+ raise ValueError('controlnet is enabled but no input image is given')
119
+ deforum_input_image = HWC3(np.asarray(deforum_input_image))
120
+
121
+ if scribble_mode:
122
+ detected_map = np.zeros_like(deforum_input_image, dtype=np.uint8)
123
+ detected_map[np.min(deforum_input_image, axis=2) < 127] = 255
124
+ deforum_input_image = detected_map
125
+
126
+ from scripts.processor import canny, midas, midas_normal, leres, hed, mlsd, openpose, pidinet, simple_scribble, fake_scribble, uniformer
127
+
128
+ preprocessor = {
129
+ "none": lambda x, *args, **kwargs: x,
130
+ "canny": canny,
131
+ "depth": midas,
132
+ "depth_leres": leres,
133
+ "hed": hed,
134
+ "mlsd": mlsd,
135
+ "normal_map": midas_normal,
136
+ "openpose": openpose,
137
+ # "openpose_hand": openpose_hand,
138
+ "pidinet": pidinet,
139
+ "scribble": simple_scribble,
140
+ "fake_scribble": fake_scribble,
141
+ "segmentation": uniformer,
142
+ }
143
+
144
+ preprocessor = preprocessor[deforum_latest_params[0]]
145
+ h, w, bsz = p.height, p.width, p.batch_size
146
+ if pres > 64:
147
+ detected_map = preprocessor(deforum_input_image, res=pres, thr_a=pthr_a, thr_b=pthr_b)
148
+ else:
149
+ detected_map = preprocessor(deforum_input_image)
150
+ detected_map = HWC3(detected_map)
151
+
152
+ if module == "normal_map" or rgbbgr_mode:
153
+ control = torch.from_numpy(detected_map[:, :, ::-1].copy()).float().to(devices.get_device_for("controlnet")) / 255.0
154
+ else:
155
+ control = torch.from_numpy(detected_map.copy()).float().to(devices.get_device_for("controlnet")) / 255.0
156
+
157
+ control = rearrange(control, 'h w c -> c h w')
158
+ detected_map = rearrange(torch.from_numpy(detected_map), 'h w c -> c h w')
159
+ if resize_mode == "Scale to Fit (Inner Fit)":
160
+ transform = Compose([
161
+ Resize(h if h<w else w, interpolation=InterpolationMode.BICUBIC),
162
+ CenterCrop(size=(h, w))
163
+ ])
164
+ control = transform(control)
165
+ detected_map = transform(detected_map)
166
+ elif resize_mode == "Envelope (Outer Fit)":
167
+ transform = Compose([
168
+ Resize(h if h>w else w, interpolation=InterpolationMode.BICUBIC),
169
+ CenterCrop(size=(h, w))
170
+ ])
171
+ control = transform(control)
172
+ detected_map = transform(detected_map)
173
+ else:
174
+ control = Resize((h,w), interpolation=InterpolationMode.BICUBIC)(control)
175
+ detected_map = Resize((h,w), interpolation=InterpolationMode.BICUBIC)(detected_map)
176
+
177
+ # for log use
178
+ detected_map = rearrange(detected_map, 'c h w -> h w c').numpy().astype(np.uint8)
179
+
180
+ # control = torch.stack([control for _ in range(bsz)], dim=0)
181
+ deforum_latest_network.notify(control, weight, guidance_strength)
182
+
183
+ if shared.opts.data.get("control_net_skip_img2img_processing") and hasattr(p, "init_images"):
184
+ swap_img2img_pipeline(p)
185
+
186
+ def swap_img2img_pipeline(p: processing.StableDiffusionProcessingImg2Img):
187
+ p.__class__ = processing.StableDiffusionProcessingTxt2Img
188
+ dummy = processing.StableDiffusionProcessingTxt2Img()
189
+ for k,v in dummy.__dict__.items():
190
+ if hasattr(p, k):
191
+ continue
192
+ setattr(p, k, v)
193
+
extensions/deforum/scripts/deforum_helpers/deprecation_utils.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is used to map deprecated setting names in a dictionary
2
+ # and print a message containing the old and the new names
3
+ # if the latter is removed completely, put a warning
4
+
5
+ # as of 2023-02-05
6
+ # "histogram_matching" -> None
7
+
8
+ deprecation_map = {
9
+ "histogram_matching": None,
10
+ "flip_2d_perspective": "enable_perspective_flip"
11
+ }
12
+
13
+ def handle_deprecated_settings(settings_json):
14
+ for old_name, new_name in deprecation_map.items():
15
+ if old_name in settings_json:
16
+ if new_name is None:
17
+ print(f"WARNING: Setting '{old_name}' has been removed. It will be discarded and the default value used instead!")
18
+ else:
19
+ print(f"WARNING: Setting '{old_name}' has been renamed to '{new_name}'. The saved settings file will reflect the change")
20
+ settings_json[new_name] = settings_json.pop(old_name)
extensions/deforum/scripts/deforum_helpers/depth.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math, os, subprocess
2
+ import cv2
3
+ import hashlib
4
+ import numpy as np
5
+ import torch
6
+ import gc
7
+ import torchvision.transforms as T
8
+ from einops import rearrange, repeat
9
+ from PIL import Image
10
+ from infer import InferenceHelper
11
+ from midas.dpt_depth import DPTDepthModel
12
+ from midas.transforms import Resize, NormalizeImage, PrepareForNet
13
+ import torchvision.transforms.functional as TF
14
+ from .general_utils import checksum
15
+
16
+ class DepthModel():
17
+ def __init__(self, device):
18
+ self.adabins_helper = None
19
+ self.depth_min = 1000
20
+ self.depth_max = -1000
21
+ self.device = device
22
+ self.midas_model = None
23
+ self.midas_transform = None
24
+
25
+ def load_adabins(self, models_path):
26
+ if not os.path.exists(os.path.join(models_path,'AdaBins_nyu.pt')):
27
+ from basicsr.utils.download_util import load_file_from_url
28
+ load_file_from_url(r"https://cloudflare-ipfs.com/ipfs/Qmd2mMnDLWePKmgfS8m6ntAg4nhV5VkUyAydYBp8cWWeB7/AdaBins_nyu.pt", models_path)
29
+ if checksum(os.path.join(models_path,'AdaBins_nyu.pt')) != "643db9785c663aca72f66739427642726b03acc6c4c1d3755a4587aa2239962746410d63722d87b49fc73581dbc98ed8e3f7e996ff7b9c0d56d0fbc98e23e41a":
30
+ raise Exception(r"Error while downloading AdaBins_nyu.pt. Please download from here: https://drive.google.com/file/d/1lvyZZbC9NLcS8a__YPcUP7rDiIpbRpoF and place in: " + models_path)
31
+ self.adabins_helper = InferenceHelper(models_path=models_path, dataset='nyu', device=self.device)
32
+
33
+ def load_midas(self, models_path, half_precision=True):
34
+ if not os.path.exists(os.path.join(models_path, 'dpt_large-midas-2f21e586.pt')):
35
+ from basicsr.utils.download_util import load_file_from_url
36
+ load_file_from_url(r"https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt", models_path)
37
+ if checksum(os.path.join(models_path,'dpt_large-midas-2f21e586.pt')) != "fcc4829e65d00eeed0a38e9001770676535d2e95c8a16965223aba094936e1316d569563552a852d471f310f83f597e8a238987a26a950d667815e08adaebc06":
38
+ raise Exception(r"Error while downloading dpt_large-midas-2f21e586.pt. Please download from here: https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt and place in: " + models_path)
39
+
40
+ self.midas_model = DPTDepthModel(
41
+ path=f"{models_path}/dpt_large-midas-2f21e586.pt",
42
+ backbone="vitl16_384",
43
+ non_negative=True,
44
+ )
45
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
46
+
47
+ self.midas_transform = T.Compose([
48
+ Resize(
49
+ 384, 384,
50
+ resize_target=None,
51
+ keep_aspect_ratio=True,
52
+ ensure_multiple_of=32,
53
+ resize_method="minimal",
54
+ image_interpolation_method=cv2.INTER_CUBIC,
55
+ ),
56
+ normalization,
57
+ PrepareForNet()
58
+ ])
59
+
60
+ self.midas_model.eval()
61
+ if self.device == torch.device("cuda"):
62
+ self.midas_model = self.midas_model.to(memory_format=torch.channels_last)
63
+ if half_precision:
64
+ self.midas_model = self.midas_model.half()
65
+ self.midas_model.to(self.device)
66
+
67
+ def predict(self, prev_img_cv2, anim_args, half_precision) -> torch.Tensor:
68
+ w, h = prev_img_cv2.shape[1], prev_img_cv2.shape[0]
69
+
70
+ # predict depth with AdaBins
71
+ use_adabins = anim_args.midas_weight < 1.0 and self.adabins_helper is not None
72
+ if use_adabins:
73
+ MAX_ADABINS_AREA = 500000
74
+ MIN_ADABINS_AREA = 448*448
75
+
76
+ # resize image if too large or too small
77
+ img_pil = Image.fromarray(cv2.cvtColor(prev_img_cv2.astype(np.uint8), cv2.COLOR_RGB2BGR))
78
+ image_pil_area = w*h
79
+ resized = True
80
+ if image_pil_area > MAX_ADABINS_AREA:
81
+ scale = math.sqrt(MAX_ADABINS_AREA) / math.sqrt(image_pil_area)
82
+ depth_input = img_pil.resize((int(w*scale), int(h*scale)), Image.LANCZOS) # LANCZOS is good for downsampling
83
+ print(f" resized to {depth_input.width}x{depth_input.height}")
84
+ elif image_pil_area < MIN_ADABINS_AREA:
85
+ scale = math.sqrt(MIN_ADABINS_AREA) / math.sqrt(image_pil_area)
86
+ depth_input = img_pil.resize((int(w*scale), int(h*scale)), Image.BICUBIC)
87
+ print(f" resized to {depth_input.width}x{depth_input.height}")
88
+ else:
89
+ depth_input = img_pil
90
+ resized = False
91
+
92
+ # predict depth and resize back to original dimensions
93
+ try:
94
+ with torch.no_grad():
95
+ _, adabins_depth = self.adabins_helper.predict_pil(depth_input)
96
+ if resized:
97
+ adabins_depth = TF.resize(
98
+ torch.from_numpy(adabins_depth),
99
+ torch.Size([h, w]),
100
+ interpolation=TF.InterpolationMode.BICUBIC
101
+ )
102
+ adabins_depth = adabins_depth.cpu().numpy()
103
+ adabins_depth = adabins_depth.squeeze()
104
+ except:
105
+ print(f" exception encountered, falling back to pure MiDaS")
106
+ use_adabins = False
107
+ torch.cuda.empty_cache()
108
+
109
+ if self.midas_model is not None:
110
+ # convert image from 0->255 uint8 to 0->1 float for feeding to MiDaS
111
+ img_midas = prev_img_cv2.astype(np.float32) / 255.0
112
+ img_midas_input = self.midas_transform({"image": img_midas})["image"]
113
+
114
+ # MiDaS depth estimation implementation
115
+ sample = torch.from_numpy(img_midas_input).float().to(self.device).unsqueeze(0)
116
+ if self.device == torch.device("cuda"):
117
+ sample = sample.to(memory_format=torch.channels_last)
118
+ if half_precision:
119
+ sample = sample.half()
120
+ with torch.no_grad():
121
+ midas_depth = self.midas_model.forward(sample)
122
+ midas_depth = torch.nn.functional.interpolate(
123
+ midas_depth.unsqueeze(1),
124
+ size=img_midas.shape[:2],
125
+ mode="bicubic",
126
+ align_corners=False,
127
+ ).squeeze()
128
+ midas_depth = midas_depth.cpu().numpy()
129
+ torch.cuda.empty_cache()
130
+
131
+ # MiDaS makes the near values greater, and the far values lesser. Let's reverse that and try to align with AdaBins a bit better.
132
+ midas_depth = np.subtract(50.0, midas_depth)
133
+ midas_depth = midas_depth / 19.0
134
+
135
+ # blend between MiDaS and AdaBins predictions
136
+ if use_adabins:
137
+ depth_map = midas_depth*anim_args.midas_weight + adabins_depth*(1.0-anim_args.midas_weight)
138
+ else:
139
+ depth_map = midas_depth
140
+
141
+ depth_map = np.expand_dims(depth_map, axis=0)
142
+ depth_tensor = torch.from_numpy(depth_map).squeeze().to(self.device)
143
+ else:
144
+ depth_tensor = torch.ones((h, w), device=self.device)
145
+
146
+ return depth_tensor
147
+
148
+ def save(self, filename: str, depth: torch.Tensor):
149
+ depth = depth.cpu().numpy()
150
+ if len(depth.shape) == 2:
151
+ depth = np.expand_dims(depth, axis=0)
152
+ self.depth_min = min(self.depth_min, depth.min())
153
+ self.depth_max = max(self.depth_max, depth.max())
154
+ print(f" depth min:{depth.min()} max:{depth.max()}")
155
+ denom = max(1e-8, self.depth_max - self.depth_min)
156
+ temp = rearrange((depth - self.depth_min) / denom * 255, 'c h w -> h w c')
157
+ temp = repeat(temp, 'h w 1 -> h w c', c=3)
158
+ Image.fromarray(temp.astype(np.uint8)).save(filename)
159
+
160
+ def to(self, device):
161
+ self.device = device
162
+ self.midas_model.to(device)
163
+ if self.adabins_helper is not None:
164
+ self.adabins_helper.to(device)
165
+ gc.collect()
166
+ torch.cuda.empty_cache()
extensions/deforum/scripts/deforum_helpers/frame_interpolation.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from rife.inference_video import run_rife_new_video_infer
4
+ from .video_audio_utilities import get_quick_vid_info, vid2frames, media_file_has_audio, extract_number, ffmpeg_stitch_video
5
+ from film_interpolation.film_inference import run_film_interp_infer
6
+ from .general_utils import duplicate_pngs_from_folder, checksum
7
+
8
+ # gets 'RIFE v4.3', returns: 'RIFE43'
9
+ def extract_rife_name(string):
10
+ parts = string.split()
11
+ if len(parts) != 2 or parts[0] != "RIFE" or (parts[1][0] != "v" or not parts[1][1:].replace('.','').isdigit()):
12
+ raise ValueError("Input string should contain exactly 2 words, first word should be 'RIFE' and second word should start with 'v' followed by 2 numbers")
13
+ return "RIFE"+parts[1][1:].replace('.','')
14
+
15
+ # This function usually gets a filename, and converts it to a legal linux/windows *folder* name
16
+ def clean_folder_name(string):
17
+ illegal_chars = ["/", "\\", "<", ">", ":", "\"", "|", "?", "*", "."]
18
+ for char in illegal_chars:
19
+ string = string.replace(char, "_")
20
+ return string
21
+
22
+ def set_interp_out_fps(interp_x, slow_x_enabled, slom_x, in_vid_fps):
23
+ if interp_x == 'Disabled' or in_vid_fps in ('---', None, '', 'None'):
24
+ return '---'
25
+
26
+ # clean_interp_x = extract_number(interp_x)
27
+ # clean_slom_x = extract_number(slom_x)
28
+ fps = float(in_vid_fps) * int(interp_x)
29
+ # if slom_x != -1:
30
+ if slow_x_enabled:
31
+ fps /= int(slom_x)
32
+ return int(fps) if fps.is_integer() else fps
33
+
34
+ # get uploaded video frame count, fps, and return 3 valuees for the gradio UI: in fcount, in fps, out fps (using the set_interp_out_fps function above)
35
+ def gradio_f_interp_get_fps_and_fcount(vid_path, interp_x, slow_x_enabled, slom_x):
36
+ if vid_path is None:
37
+ return '---', '---', '---'
38
+ fps, fcount, resolution = get_quick_vid_info(vid_path.name)
39
+ expected_out_fps = set_interp_out_fps(interp_x, slow_x_enabled, slom_x, fps)
40
+ return (str(round(fps,2)) if fps is not None else '---', (round(fcount,2)) if fcount is not None else '---', round(expected_out_fps,2))
41
+
42
+ # handle call to interpolate an uploaded video from gradio button in args.py (the function that calls this func is named 'upload_vid_to_rife')
43
+ def process_interp_vid_upload_logic(file, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, in_vid_fps, f_models_path, vid_file_name):
44
+
45
+ print("got a request to *frame interpolate* an existing video.")
46
+
47
+ _, _, resolution = get_quick_vid_info(file.name)
48
+ folder_name = clean_folder_name(Path(vid_file_name).stem)
49
+ outdir_no_tmp = os.path.join(os.getcwd(), 'outputs', 'frame-interpolation', folder_name)
50
+ i = 1
51
+ while os.path.exists(outdir_no_tmp):
52
+ outdir_no_tmp = os.path.join(os.getcwd(), 'outputs', 'frame-interpolation', folder_name + '_' + str(i))
53
+ i += 1
54
+
55
+ outdir = os.path.join(outdir_no_tmp, 'tmp_input_frames')
56
+ os.makedirs(outdir, exist_ok=True)
57
+
58
+ vid2frames(video_path=file.name, video_in_frame_path=outdir, overwrite=True, extract_from_frame=0, extract_to_frame=-1, numeric_files_output=True, out_img_format='png')
59
+
60
+ # check if the uploaded vid has an audio stream. If it doesn't, set audio param to None so that ffmpeg won't try to add non-existing audio to final video.
61
+ audio_file_to_pass = None
62
+ if media_file_has_audio(file.name, f_location):
63
+ audio_file_to_pass = file.name
64
+
65
+ process_video_interpolation(frame_interpolation_engine=engine, frame_interpolation_x_amount=x_am, frame_interpolation_slow_mo_enabled = sl_enabled,frame_interpolation_slow_mo_amount=sl_am, orig_vid_fps=in_vid_fps, deforum_models_path=f_models_path, real_audio_track=audio_file_to_pass, raw_output_imgs_path=outdir, img_batch_id=None, ffmpeg_location=f_location, ffmpeg_crf=f_crf, ffmpeg_preset=f_preset, keep_interp_imgs=keep_imgs, orig_vid_name=folder_name, resolution=resolution)
66
+
67
+ # handle params before talking with the actual interpolation module (rifee/film, more to be added)
68
+ def process_video_interpolation(frame_interpolation_engine, frame_interpolation_x_amount, frame_interpolation_slow_mo_enabled, frame_interpolation_slow_mo_amount, orig_vid_fps, deforum_models_path, real_audio_track, raw_output_imgs_path, img_batch_id, ffmpeg_location, ffmpeg_crf, ffmpeg_preset, keep_interp_imgs, orig_vid_name, resolution):
69
+
70
+ # set initial output vid fps
71
+ fps = float(orig_vid_fps) * frame_interpolation_x_amount
72
+
73
+ # re-calculate fps param to pass if slow_mo mode is enabled
74
+ if frame_interpolation_slow_mo_enabled:
75
+ fps = float(orig_vid_fps) * frame_interpolation_x_amount / int(frame_interpolation_slow_mo_amount)
76
+ # disable audio-adding by setting real_audio_track to None if slow-mo is enabled
77
+ if real_audio_track is not None and frame_interpolation_slow_mo_enabled:
78
+ real_audio_track = None
79
+
80
+ if frame_interpolation_engine == 'None':
81
+ return
82
+ elif frame_interpolation_engine.startswith("RIFE"):
83
+ # make sure interp_x is valid and in range
84
+ if frame_interpolation_x_amount not in range(2, 11):
85
+ raise Error("frame_interpolation_x_amount must be between 2x and 10x")
86
+
87
+ # set UHD to True if res' is 2K or higher
88
+ if resolution:
89
+ UHD = resolution[0] >= 2048 and resolution[1] >= 2048
90
+ else:
91
+ UHD = False
92
+ # e.g from "RIFE v2.3 to RIFE23"
93
+ actual_model_folder_name = extract_rife_name(frame_interpolation_engine)
94
+
95
+ # run actual rife interpolation and video stitching etc - the whole suite
96
+ run_rife_new_video_infer(interp_x_amount=frame_interpolation_x_amount, slow_mo_enabled = frame_interpolation_slow_mo_enabled, slow_mo_x_amount=frame_interpolation_slow_mo_amount, model=actual_model_folder_name, fps=fps, deforum_models_path=deforum_models_path, audio_track=real_audio_track, raw_output_imgs_path=raw_output_imgs_path, img_batch_id=img_batch_id, ffmpeg_location=ffmpeg_location, ffmpeg_crf=ffmpeg_crf, ffmpeg_preset=ffmpeg_preset, keep_imgs=keep_interp_imgs, orig_vid_name=orig_vid_name, UHD=UHD)
97
+ elif frame_interpolation_engine == 'FILM':
98
+ prepare_film_inference(deforum_models_path=deforum_models_path, x_am=frame_interpolation_x_amount, sl_enabled=frame_interpolation_slow_mo_enabled, sl_am=frame_interpolation_slow_mo_amount, keep_imgs=keep_interp_imgs, raw_output_imgs_path=raw_output_imgs_path, img_batch_id=img_batch_id, f_location=ffmpeg_location, f_crf=ffmpeg_crf, f_preset=ffmpeg_preset, fps=fps, audio_track=real_audio_track, orig_vid_name=orig_vid_name)
99
+ else:
100
+ print("Unknown Frame Interpolation engine chosen. Doing nothing.")
101
+ return
102
+
103
+ def prepare_film_inference(deforum_models_path, x_am, sl_enabled, sl_am, keep_imgs, raw_output_imgs_path, img_batch_id, f_location, f_crf, f_preset, fps, audio_track, orig_vid_name):
104
+ import shutil
105
+
106
+ parent_folder = os.path.dirname(raw_output_imgs_path)
107
+ grandparent_folder = os.path.dirname(parent_folder)
108
+ if orig_vid_name is not None:
109
+ interp_vid_path = os.path.join(parent_folder, str(orig_vid_name) +'_FILM_x' + str(x_am))
110
+ else:
111
+ interp_vid_path = os.path.join(raw_output_imgs_path, str(img_batch_id) +'_FILM_x' + str(x_am))
112
+
113
+ film_model_name = 'film_net_fp16.pt'
114
+ film_model_folder = os.path.join(deforum_models_path,'film_interpolation')
115
+ film_model_path = os.path.join(film_model_folder, film_model_name) # actual full path to the film .pt model file
116
+ output_interp_imgs_folder = os.path.join(raw_output_imgs_path, 'interpolated_frames_film')
117
+ # set custom name depending on if we interpolate after a run, or interpolate a video (related/unrelated to deforum, we don't know) directly from within the interpolation tab
118
+ # interpolated_path = os.path.join(args.raw_output_imgs_path, 'interpolated_frames_rife')
119
+ if orig_vid_name is not None: # interpolating a video (deforum or unrelated)
120
+ custom_interp_path = "{}_{}".format(output_interp_imgs_folder, orig_vid_name)
121
+ else: # interpolating after a deforum run:
122
+ custom_interp_path = "{}_{}".format(output_interp_imgs_folder, img_batch_id)
123
+
124
+ # interp_vid_path = os.path.join(raw_output_imgs_path, str(img_batch_id) + '_FILM_x' + str(x_am))
125
+ img_path_for_ffmpeg = os.path.join(custom_interp_path, "frame_%05d.png")
126
+
127
+ if sl_enabled:
128
+ interp_vid_path = interp_vid_path + '_slomo_x' + str(sl_am)
129
+ interp_vid_path = interp_vid_path + '.mp4'
130
+
131
+ # In this folder we temporarily keep the original frames (converted/ copy-pasted and img format depends on scenario)
132
+ # the convertion case is done to avert a problem with 24 and 32 mixed outputs from the same animation run
133
+ temp_convert_raw_png_path = os.path.join(raw_output_imgs_path, "tmp_film_folder")
134
+ total_frames = duplicate_pngs_from_folder(raw_output_imgs_path, temp_convert_raw_png_path, img_batch_id, None)
135
+ check_and_download_film_model('film_net_fp16.pt', film_model_folder) # TODO: split this part
136
+
137
+ # get number of in-between-frames to provide to FILM - mimics how RIFE works, we should get the same amount of total frames in the end
138
+ film_in_between_frames_count = calculate_frames_to_add(total_frames, x_am)
139
+ # Run actual FILM inference
140
+ run_film_interp_infer(
141
+ model_path = film_model_path,
142
+ input_folder = temp_convert_raw_png_path,
143
+ save_folder = custom_interp_path, # output folder is created in the infer part
144
+ inter_frames = film_in_between_frames_count)
145
+
146
+ add_soundtrack = 'None'
147
+ if not audio_track is None:
148
+ add_soundtrack = 'File'
149
+
150
+ print (f"*Passing interpolated frames to ffmpeg...*")
151
+ exception_raised = False
152
+ try:
153
+ ffmpeg_stitch_video(ffmpeg_location=f_location, fps=fps, outmp4_path=interp_vid_path, stitch_from_frame=0, stitch_to_frame=999999, imgs_path=img_path_for_ffmpeg, add_soundtrack=add_soundtrack, audio_path=audio_track, crf=f_crf, preset=f_preset)
154
+ except Exception as e:
155
+ exception_raised = True
156
+ print(f"An error occurred while stitching the video: {e}")
157
+
158
+ if orig_vid_name and (keep_imgs or exception_raised):
159
+ shutil.move(custom_interp_path, parent_folder)
160
+ if not keep_imgs and not exception_raised:
161
+ if fps <= 450: # keep interp frames automatically if out_vid fps is above 450
162
+ shutil.rmtree(custom_interp_path, ignore_errors=True)
163
+ # delete duplicated raw non-interpolated frames
164
+ shutil.rmtree(temp_convert_raw_png_path, ignore_errors=True)
165
+ # remove folder with raw (non-interpolated) vid input frames in case of input VID and not PNGs
166
+ if orig_vid_name:
167
+ shutil.rmtree(raw_output_imgs_path, ignore_errors=True)
168
+
169
+ def check_and_download_film_model(model_name, model_dest_folder):
170
+ from basicsr.utils.download_util import load_file_from_url
171
+ if model_name == 'film_net_fp16.pt':
172
+ model_dest_path = os.path.join(model_dest_folder, model_name)
173
+ download_url = 'https://github.com/hithereai/frame-interpolation-pytorch/releases/download/film_net_fp16.pt/film_net_fp16.pt'
174
+ film_model_hash = '0a823815b111488ac2b7dd7fe6acdd25d35a22b703e8253587764cf1ee3f8f93676d24154d9536d2ce5bc3b2f102fb36dfe0ca230dfbe289d5cd7bde5a34ec12'
175
+ else: # Unknown FILM model
176
+ raise Exception("Got a request to download an unknown FILM model. Can't proceed.")
177
+ if os.path.exists(model_dest_path):
178
+ return
179
+ try:
180
+ os.makedirs(model_dest_folder, exist_ok=True)
181
+ # download film model from url
182
+ load_file_from_url(download_url, model_dest_folder)
183
+ # verify checksum
184
+ if checksum(model_dest_path) != film_model_hash:
185
+ raise Exception(f"Error while downloading {model_name}. Please download from: {download_url}, and put in: {model_dest_folder}")
186
+ except Exception as e:
187
+ raise Exception(f"Error while downloading {model_name}. Please download from: {download_url}, and put in: {model_dest_folder}")
188
+
189
+ # get film no. of frames to add after each pic from tot frames in interp_x values
190
+ def calculate_frames_to_add(total_frames, interp_x):
191
+ frames_to_add = (total_frames * interp_x - total_frames) / (total_frames - 1)
192
+ return int(round(frames_to_add))
extensions/deforum/scripts/deforum_helpers/general_utils.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ def checksum(filename, hash_factory=hashlib.blake2b, chunk_num_blocks=128):
3
+ h = hash_factory()
4
+ with open(filename,'rb') as f:
5
+ while chunk := f.read(chunk_num_blocks*h.block_size):
6
+ h.update(chunk)
7
+ return h.hexdigest()
8
+
9
+ def get_os():
10
+ import platform
11
+ return {"Windows": "Windows", "Linux": "Linux", "Darwin": "Mac"}.get(platform.system(), "Unknown")
12
+
13
+ # used in src/rife/inference_video.py and more, soon
14
+ def duplicate_pngs_from_folder(from_folder, to_folder, img_batch_id, orig_vid_name):
15
+ import os, cv2, shutil #, subprocess
16
+ #TODO: don't copy-paste at all if the input is a video (now it copy-pastes, and if input is deforum run is also converts to make sure no errors rise cuz of 24-32 bit depth differences)
17
+ temp_convert_raw_png_path = os.path.join(from_folder, to_folder)
18
+ if not os.path.exists(temp_convert_raw_png_path):
19
+ os.makedirs(temp_convert_raw_png_path)
20
+
21
+ frames_handled = 0
22
+ for f in os.listdir(from_folder):
23
+ if ('png' in f or 'jpg' in f) and '-' not in f and '_depth_' not in f and ((img_batch_id is not None and f.startswith(img_batch_id) or img_batch_id is None)):
24
+ frames_handled +=1
25
+ original_img_path = os.path.join(from_folder, f)
26
+ if orig_vid_name is not None:
27
+ shutil.copy(original_img_path, temp_convert_raw_png_path)
28
+ else:
29
+ image = cv2.imread(original_img_path)
30
+ new_path = os.path.join(temp_convert_raw_png_path, f)
31
+ cv2.imwrite(new_path, image, [cv2.IMWRITE_PNG_COMPRESSION, 0])
32
+ return frames_handled
extensions/deforum/scripts/deforum_helpers/generate.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ from PIL import Image
4
+ from .prompt import split_weighted_subprompts
5
+ from .load_images import load_img, prepare_mask, check_mask_for_errors
6
+ from .webui_sd_pipeline import get_webui_sd_pipeline
7
+ from .animation import sample_from_cv2, sample_to_cv2
8
+ from .rich import console
9
+ #Webui
10
+ import cv2
11
+ from .animation import sample_from_cv2, sample_to_cv2
12
+ from modules import processing, sd_models
13
+ from modules.shared import opts, sd_model
14
+ from modules.processing import process_images, StableDiffusionProcessingTxt2Img
15
+ from .deforum_controlnet import is_controlnet_enabled, process_txt2img_with_controlnet, process_img2img_with_controlnet
16
+
17
+ import math, json, itertools
18
+ import requests
19
+
20
+ def load_mask_latent(mask_input, shape):
21
+ # mask_input (str or PIL Image.Image): Path to the mask image or a PIL Image object
22
+ # shape (list-like len(4)): shape of the image to match, usually latent_image.shape
23
+
24
+ if isinstance(mask_input, str): # mask input is probably a file name
25
+ if mask_input.startswith('http://') or mask_input.startswith('https://'):
26
+ mask_image = Image.open(requests.get(mask_input, stream=True).raw).convert('RGBA')
27
+ else:
28
+ mask_image = Image.open(mask_input).convert('RGBA')
29
+ elif isinstance(mask_input, Image.Image):
30
+ mask_image = mask_input
31
+ else:
32
+ raise Exception("mask_input must be a PIL image or a file name")
33
+
34
+ mask_w_h = (shape[-1], shape[-2])
35
+ mask = mask_image.resize(mask_w_h, resample=Image.LANCZOS)
36
+ mask = mask.convert("L")
37
+ return mask
38
+
39
+ def isJson(myjson):
40
+ try:
41
+ json.loads(myjson)
42
+ except ValueError as e:
43
+ return False
44
+ return True
45
+
46
+ # Add pairwise implementation here not to upgrade
47
+ # the whole python to 3.10 just for one function
48
+ def pairwise_repl(iterable):
49
+ a, b = itertools.tee(iterable)
50
+ next(b, None)
51
+ return zip(a, b)
52
+
53
+ def generate(args, anim_args, loop_args, controlnet_args, root, frame = 0, return_sample=False, sampler_name=None):
54
+ assert args.prompt is not None
55
+
56
+ # Setup the pipeline
57
+ p = get_webui_sd_pipeline(args, root, frame)
58
+ p.prompt, p.negative_prompt = split_weighted_subprompts(args.prompt, frame)
59
+
60
+ if not args.use_init and args.strength > 0 and args.strength_0_no_init:
61
+ print("\nNo init image, but strength > 0. Strength has been auto set to 0, since use_init is False.")
62
+ print("If you want to force strength > 0 with no init, please set strength_0_no_init to False.\n")
63
+ args.strength = 0
64
+ processed = None
65
+ mask_image = None
66
+ init_image = None
67
+ image_init0 = None
68
+
69
+ if loop_args.use_looper:
70
+ # TODO find out why we need to set this in the init tab
71
+ if args.strength == 0:
72
+ raise RuntimeError("Strength needs to be greater than 0 in Init tab and strength_0_no_init should *not* be checked")
73
+ if args.seed_behavior != "schedule":
74
+ raise RuntimeError("seed_behavior needs to be set to schedule in under 'Keyframes' tab --> 'Seed scheduling'")
75
+ if not isJson(loop_args.imagesToKeyframe):
76
+ raise RuntimeError("The images set for use with keyframe-guidance are not in a proper JSON format")
77
+ args.strength = loop_args.imageStrength
78
+ tweeningFrames = loop_args.tweeningFrameSchedule
79
+ blendFactor = .07
80
+ colorCorrectionFactor = loop_args.colorCorrectionFactor
81
+ jsonImages = json.loads(loop_args.imagesToKeyframe)
82
+ framesToImageSwapOn = list(map(int, list(jsonImages.keys())))
83
+ # find which image to show
84
+ frameToChoose = 0
85
+ for swappingFrame in framesToImageSwapOn[1:]:
86
+ frameToChoose += (frame >= int(swappingFrame))
87
+
88
+ #find which frame to do our swapping on for tweening
89
+ skipFrame = 25
90
+ for fs, fe in pairwise_repl(framesToImageSwapOn):
91
+ if fs <= frame <= fe:
92
+ skipFrame = fe - fs
93
+
94
+ if frame % skipFrame <= tweeningFrames: # number of tweening frames
95
+ blendFactor = loop_args.blendFactorMax - loop_args.blendFactorSlope*math.cos((frame % tweeningFrames) / (tweeningFrames / 2))
96
+ init_image2, _ = load_img(list(jsonImages.values())[frameToChoose],
97
+ shape=(args.W, args.H),
98
+ use_alpha_as_mask=args.use_alpha_as_mask)
99
+ image_init0 = list(jsonImages.values())[0]
100
+
101
+ else: # they passed in a single init image
102
+ image_init0 = args.init_image
103
+
104
+
105
+ available_samplers = {
106
+ 'euler a':'Euler a',
107
+ 'euler':'Euler',
108
+ 'lms':'LMS',
109
+ 'heun':'Heun',
110
+ 'dpm2':'DPM2',
111
+ 'dpm2 a':'DPM2 a',
112
+ 'dpm++ 2s a':'DPM++ 2S a',
113
+ 'dpm++ 2m':'DPM++ 2M',
114
+ 'dpm++ sde':'DPM++ SDE',
115
+ 'dpm fast':'DPM fast',
116
+ 'dpm adaptive':'DPM adaptive',
117
+ 'lms karras':'LMS Karras' ,
118
+ 'dpm2 karras':'DPM2 Karras',
119
+ 'dpm2 a karras':'DPM2 a Karras',
120
+ 'dpm++ 2s a karras':'DPM++ 2S a Karras',
121
+ 'dpm++ 2m karras':'DPM++ 2M Karras',
122
+ 'dpm++ sde karras':'DPM++ SDE Karras'
123
+ }
124
+ if sampler_name is not None:
125
+ if sampler_name in available_samplers.keys():
126
+ args.sampler = available_samplers[sampler_name]
127
+
128
+ if args.checkpoint is not None:
129
+ info = sd_models.get_closet_checkpoint_match(args.checkpoint)
130
+ if info is None:
131
+ raise RuntimeError(f"Unknown checkpoint: {args.checkpoint}")
132
+ sd_models.reload_model_weights(info=info)
133
+
134
+ if args.init_sample is not None:
135
+ # TODO: cleanup init_sample remains later
136
+ img = args.init_sample
137
+ init_image = img
138
+ image_init0 = img
139
+ if loop_args.use_looper and isJson(loop_args.imagesToKeyframe):
140
+ init_image = Image.blend(init_image, init_image2, blendFactor)
141
+ correction_colors = Image.blend(init_image, init_image2, colorCorrectionFactor)
142
+ p.color_corrections = [processing.setup_color_correction(correction_colors)]
143
+
144
+ # this is the first pass
145
+ elif loop_args.use_looper or (args.use_init and ((args.init_image != None and args.init_image != ''))):
146
+ init_image, mask_image = load_img(image_init0, # initial init image
147
+ shape=(args.W, args.H),
148
+ use_alpha_as_mask=args.use_alpha_as_mask)
149
+
150
+ else:
151
+
152
+ if anim_args.animation_mode != 'Interpolation':
153
+ print(f"Not using an init image (doing pure txt2img)")
154
+ p_txt = StableDiffusionProcessingTxt2Img(
155
+ sd_model=sd_model,
156
+ outpath_samples=root.tmp_deforum_run_duplicated_folder,
157
+ outpath_grids=root.tmp_deforum_run_duplicated_folder,
158
+ prompt=p.prompt,
159
+ styles=p.styles,
160
+ negative_prompt=p.negative_prompt,
161
+ seed=p.seed,
162
+ subseed=p.subseed,
163
+ subseed_strength=p.subseed_strength,
164
+ seed_resize_from_h=p.seed_resize_from_h,
165
+ seed_resize_from_w=p.seed_resize_from_w,
166
+ sampler_name=p.sampler_name,
167
+ batch_size=p.batch_size,
168
+ n_iter=p.n_iter,
169
+ steps=p.steps,
170
+ cfg_scale=p.cfg_scale,
171
+ width=p.width,
172
+ height=p.height,
173
+ restore_faces=p.restore_faces,
174
+ tiling=p.tiling,
175
+ enable_hr=None,
176
+ denoising_strength=None,
177
+ )
178
+ # print dynamic table to cli
179
+ print_generate_table(args, anim_args, p_txt)
180
+
181
+ if is_controlnet_enabled(controlnet_args):
182
+ processed = process_txt2img_with_controlnet(p, args, anim_args, loop_args, controlnet_args, root, frame)
183
+ else:
184
+ processed = processing.process_images(p_txt)
185
+
186
+ if processed is None:
187
+ # Mask functions
188
+ if args.use_mask:
189
+ mask = args.mask_image
190
+ #assign masking options to pipeline
191
+ if mask is not None:
192
+ p.inpainting_mask_invert = args.invert_mask
193
+ p.inpainting_fill = args.fill
194
+ p.inpaint_full_res= args.full_res_mask
195
+ p.inpaint_full_res_padding = args.full_res_mask_padding
196
+ else:
197
+ mask = None
198
+
199
+ assert not ( (mask is not None and args.use_mask and args.overlay_mask) and (args.init_sample is None and init_image is None)), "Need an init image when use_mask == True and overlay_mask == True"
200
+
201
+ p.init_images = [init_image]
202
+ p.image_mask = mask
203
+ p.image_cfg_scale = args.pix2pix_img_cfg_scale
204
+
205
+ # print dynamic table to cli
206
+ print_generate_table(args, anim_args, p)
207
+
208
+ if is_controlnet_enabled(controlnet_args):
209
+ processed = process_img2img_with_controlnet(p, args, anim_args, loop_args, controlnet_args, root, frame)
210
+ else:
211
+ processed = processing.process_images(p)
212
+
213
+ if root.initial_info == None:
214
+ root.initial_seed = processed.seed
215
+ root.initial_info = processed.info
216
+
217
+ if root.first_frame == None:
218
+ root.first_frame = processed.images[0]
219
+
220
+ results = processed.images[0]
221
+
222
+ return results
223
+
224
+ def print_generate_table(args, anim_args, p):
225
+ from rich.table import Table
226
+ from rich import box
227
+ table = Table(padding=0, box=box.ROUNDED)
228
+ field_names = ["Steps", "CFG"]
229
+ if anim_args.animation_mode != 'Interpolation':
230
+ field_names.append("Denoise")
231
+ field_names += ["Subseed", "Subs. str"] * (anim_args.enable_subseed_scheduling)
232
+ field_names += ["Sampler"] * anim_args.enable_sampler_scheduling
233
+ field_names += ["Checkpoint"] * anim_args.enable_checkpoint_scheduling
234
+ for field_name in field_names:
235
+ table.add_column(field_name, justify="center")
236
+ rows = [str(p.steps), str(p.cfg_scale)]
237
+ if anim_args.animation_mode != 'Interpolation':
238
+ rows.append(str(p.denoising_strength))
239
+ rows += [str(p.subseed), str(p.subseed_strength)] * (anim_args.enable_subseed_scheduling)
240
+ rows += [p.sampler_name] * anim_args.enable_sampler_scheduling
241
+ rows += [str(args.checkpoint)] * anim_args.enable_checkpoint_scheduling
242
+ table.add_row(*rows)
243
+
244
+ console.print(table)
extensions/deforum/scripts/deforum_helpers/gradio_funcs.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from .video_audio_utilities import extract_number, get_quick_vid_info
3
+
4
+ def change_visibility_from_skip_video(choice):
5
+ return gr.update(visible=False) if choice else gr.update(visible=True)
6
+
7
+ def update_r_upscale_factor(choice):
8
+ return gr.update(value='x4', choices = ['x4']) if choice != 'realesr-animevideov3' else gr.update(value='x2', choices = ['x2', 'x3', 'x4'])
9
+
10
+ def change_perlin_visibility(choice):
11
+ return gr.update(visible=choice=="perlin")
12
+
13
+ def change_color_coherence_video_every_N_frames_visibility(choice):
14
+ return gr.update(visible=choice=="Video Input")
15
+
16
+ def change_seed_iter_visibility(choice):
17
+ return gr.update(visible=choice=="iter")
18
+
19
+ def change_seed_schedule_visibility(choice):
20
+ return gr.update(visible=choice=="schedule")
21
+
22
+ def disable_pers_flip_accord(choice):
23
+ return gr.update(visible=True) if choice in ['2D','3D'] else gr.update(visible=False)
24
+
25
+ def change_max_frames_visibility(choice):
26
+ return gr.update(visible=choice != "Video Input")
27
+
28
+ def change_diffusion_cadence_visibility(choice):
29
+ return gr.update(visible=choice not in ['Video Input', 'Interpolation'])
30
+
31
+ def disble_3d_related_stuff(choice):
32
+ return gr.update(visible=False) if choice != '3D' else gr.update(visible=True)
33
+
34
+ def enable_2d_related_stuff(choice):
35
+ return gr.update(visible=True) if choice == '2D' else gr.update(visible=False)
36
+
37
+ def disable_by_interpolation(choice):
38
+ return gr.update(visible=False) if choice in ['Interpolation'] else gr.update(visible=True)
39
+
40
+ def disable_by_video_input(choice):
41
+ return gr.update(visible=False) if choice in ['Video Input'] else gr.update(visible=True)
42
+
43
+ def change_comp_mask_x_visibility(choice):
44
+ return gr.update(visible=choice != "None")
45
+
46
+ def change_gif_button_visibility(choice):
47
+ return gr.update(visible=False, value=False) if int(choice) > 30 else gr.update(visible=True)
48
+
49
+ def disable_by_hybrid_composite(choice):
50
+ return gr.update(visible=True) if choice else gr.update(visible=False)
51
+
52
+ def disable_by_hybrid_composite_dynamic(choice, comp_mask_type):
53
+ if choice == True:
54
+ if comp_mask_type != 'None':
55
+ return gr.update(visible=True)
56
+ return gr.update(visible=False)
57
+
58
+ def disable_by_comp_mask(choice):
59
+ return gr.update(visible=False) if choice == 'None' else gr.update(visible=True)
60
+
61
+ def disable_by_non_optical_flow(choice):
62
+ return gr.update(visible=False) if choice != 'Optical Flow' else gr.update(visible=True)
63
+
64
+ # Upscaling Gradio UI related funcs
65
+ def vid_upscale_gradio_update_stats(vid_path, upscale_factor):
66
+ if not vid_path:
67
+ return '---', '---', '---', '---'
68
+ factor = extract_number(upscale_factor)
69
+ fps, fcount, resolution = get_quick_vid_info(vid_path.name)
70
+ in_res_str = f"{resolution[0]}*{resolution[1]}"
71
+ out_res_str = f"{resolution[0] * factor}*{resolution[1] * factor}"
72
+ return fps, fcount, in_res_str, out_res_str
73
+ def update_upscale_out_res(in_res, upscale_factor):
74
+ if not in_res:
75
+ return '---'
76
+ factor = extract_number(upscale_factor)
77
+ w, h = [int(x) * factor for x in in_res.split('*')]
78
+ return f"{w}*{h}"
79
+ def update_upscale_out_res_by_model_name(in_res, upscale_model_name):
80
+ if not upscale_model_name or in_res == '---':
81
+ return '---'
82
+ factor = 2 if upscale_model_name == 'realesr-animevideov3' else 4
83
+ return f"{int(in_res.split('*')[0]) * factor}*{int(in_res.split('*')[1]) * factor}"
extensions/deforum/scripts/deforum_helpers/human_masking.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, cv2
2
+ import torch
3
+ from pathlib import Path
4
+ from multiprocessing import freeze_support
5
+
6
+ def extract_frames(input_video_path, output_imgs_path):
7
+ # Open the video file
8
+ vidcap = cv2.VideoCapture(input_video_path)
9
+
10
+ # Get the total number of frames in the video
11
+ frame_count = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
12
+
13
+ # Create the output directory if it does not exist
14
+ if not os.path.exists(output_imgs_path):
15
+ os.makedirs(output_imgs_path)
16
+
17
+ # Extract the frames
18
+ for i in range(frame_count):
19
+ success, image = vidcap.read()
20
+ if success:
21
+ cv2.imwrite(os.path.join(output_imgs_path, f"frame{i}.png"), image)
22
+ print(f"{frame_count} frames extracted and saved to {output_imgs_path}")
23
+
24
+
25
+ def video2humanmasks(input_frames_path, output_folder_path, output_type, fps):
26
+ # freeze support is needed for video outputting
27
+ freeze_support()
28
+
29
+ # check if input path exists and is a directory
30
+ if not os.path.exists(input_frames_path) or not os.path.isdir(input_frames_path):
31
+ raise ValueError("Invalid input path: {}".format(input_frames_path))
32
+
33
+ # check if output path exists and is a directory
34
+ if not os.path.exists(output_folder_path) or not os.path.isdir(output_folder_path):
35
+ raise ValueError("Invalid output path: {}".format(output_folder_path))
36
+
37
+ # check if output_type is valid
38
+ valid_output_types = ["video", "pngs", "both"]
39
+ if output_type.lower() not in valid_output_types:
40
+ raise ValueError("Invalid output type: {}. Must be one of {}".format(output_type, valid_output_types))
41
+
42
+ # try to predict where torch cache lives, so we can try and fetch models from cache in the next step
43
+ predicted_torch_model_cache_path = os.path.join(Path.home(), ".cache", "torch", "hub", "hithereai_RobustVideoMatting_master")
44
+ predicted_rvm_cache_testilfe = os.path.join(predicted_torch_model_cache_path, "hubconf.py")
45
+
46
+ # try to fetch the models from cache, and only if it can't be find, download from the internet (to enable offline usage)
47
+ try:
48
+ # Try to fetch the models from cache
49
+ convert_video = torch.hub.load(predicted_torch_model_cache_path, "converter", source='local')
50
+ model = torch.hub.load(predicted_torch_model_cache_path, "mobilenetv3", source='local').cuda()
51
+ except:
52
+ # Download from the internet if not found in cache
53
+ convert_video = torch.hub.load("hithereai/RobustVideoMatting", "converter")
54
+ model = torch.hub.load("hithereai/RobustVideoMatting", "mobilenetv3").cuda()
55
+
56
+ output_alpha_vid_path = os.path.join(output_folder_path, "human_masked_video.mp4")
57
+ # extract humans masks from the input folder' imgs.
58
+ # in this step PNGs will be extracted only if output_type is set to PNGs. Otherwise a video will be made, and in the case of Both, the video will be extracted in the next step to PNGs
59
+ convert_video(
60
+ model,
61
+ input_source=input_frames_path, # full path of the folder that contains all of the extracted input imgs
62
+ output_type='video' if output_type.upper() in ("VIDEO", "BOTH") else 'png_sequence',
63
+ output_alpha=output_alpha_vid_path if output_type.upper() in ("VIDEO", "BOTH") else output_folder_path,
64
+ output_video_mbps=4,
65
+ output_video_fps=fps,
66
+ downsample_ratio=None, # None for auto
67
+ seq_chunk=12, # Process n frames at once for better parallelism
68
+ progress=True # show extraction progress
69
+ )
70
+
71
+ if output_type.lower() == "both":
72
+ extract_frames(output_alpha_vid_path, output_folder_path)
extensions/deforum/scripts/deforum_helpers/hybrid_video.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import os
3
+ import pathlib
4
+ import numpy as np
5
+ import random
6
+ from PIL import Image, ImageChops, ImageOps, ImageEnhance
7
+ from .video_audio_utilities import vid2frames, get_quick_vid_info, get_frame_name, get_next_frame
8
+ from .human_masking import video2humanmasks
9
+
10
+ def delete_all_imgs_in_folder(folder_path):
11
+ files = list(pathlib.Path(folder_path).glob('*.jpg'))
12
+ files.extend(list(pathlib.Path(folder_path).glob('*.png')))
13
+ for f in files: os.remove(f)
14
+
15
+ def hybrid_generation(args, anim_args, root):
16
+ video_in_frame_path = os.path.join(args.outdir, 'inputframes')
17
+ hybrid_frame_path = os.path.join(args.outdir, 'hybridframes')
18
+ human_masks_path = os.path.join(args.outdir, 'human_masks')
19
+
20
+ if anim_args.hybrid_generate_inputframes:
21
+ # create folders for the video input frames and optional hybrid frames to live in
22
+ os.makedirs(video_in_frame_path, exist_ok=True)
23
+ os.makedirs(hybrid_frame_path, exist_ok=True)
24
+
25
+ # delete frames if overwrite = true
26
+ if anim_args.overwrite_extracted_frames:
27
+ delete_all_imgs_in_folder(hybrid_frame_path)
28
+
29
+ # save the video frames from input video
30
+ print(f"Video to extract: {anim_args.video_init_path}")
31
+ print(f"Extracting video (1 every {anim_args.extract_nth_frame}) frames to {video_in_frame_path}...")
32
+ video_fps = vid2frames(video_path=anim_args.video_init_path, video_in_frame_path=video_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame)
33
+
34
+ # extract alpha masks of humans from the extracted input video imgs
35
+ if anim_args.hybrid_generate_human_masks != "None":
36
+ # create a folder for the human masks imgs to live in
37
+ print(f"Checking /creating a folder for the human masks")
38
+ os.makedirs(human_masks_path, exist_ok=True)
39
+
40
+ # delete frames if overwrite = true
41
+ if anim_args.overwrite_extracted_frames:
42
+ delete_all_imgs_in_folder(human_masks_path)
43
+
44
+ # in case that generate_input_frames isn't selected, we won't get the video fps rate as vid2frames isn't called, So we'll check the video fps in here instead
45
+ if not anim_args.hybrid_generate_inputframes:
46
+ _, video_fps, _ = get_quick_vid_info(anim_args.video_init_path)
47
+
48
+ # calculate the correct fps of the masked video according to the original video fps and 'extract_nth_frame'
49
+ output_fps = video_fps/anim_args.extract_nth_frame
50
+
51
+ # generate the actual alpha masks from the input imgs
52
+ print(f"Extracting alpha humans masks from the input frames")
53
+ video2humanmasks(video_in_frame_path, human_masks_path, anim_args.hybrid_generate_human_masks, output_fps)
54
+
55
+ # determine max frames from length of input frames
56
+ anim_args.max_frames = len([f for f in pathlib.Path(video_in_frame_path).glob('*.jpg')])
57
+ print(f"Using {anim_args.max_frames} input frames from {video_in_frame_path}...")
58
+
59
+ # get sorted list of inputfiles
60
+ inputfiles = sorted(pathlib.Path(video_in_frame_path).glob('*.jpg'))
61
+
62
+ # use first frame as init
63
+ if anim_args.hybrid_use_first_frame_as_init_image:
64
+ for f in inputfiles:
65
+ args.init_image = str(f)
66
+ args.use_init = True
67
+ print(f"Using init_image from video: {args.init_image}")
68
+ break
69
+
70
+ return args, anim_args, inputfiles
71
+
72
+ def hybrid_composite(args, anim_args, frame_idx, prev_img, depth_model, hybrid_comp_schedules, root):
73
+ video_frame = os.path.join(args.outdir, 'inputframes', get_frame_name(anim_args.video_init_path) + f"{frame_idx:05}.jpg")
74
+ video_depth_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_vid_depth{frame_idx:05}.jpg")
75
+ depth_frame = os.path.join(args.outdir, f"{args.timestring}_depth_{frame_idx-1:05}.png")
76
+ mask_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_mask{frame_idx:05}.jpg")
77
+ comp_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_comp{frame_idx:05}.jpg")
78
+ prev_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_prev{frame_idx:05}.jpg")
79
+ prev_img = cv2.cvtColor(prev_img, cv2.COLOR_BGR2RGB)
80
+ prev_img_hybrid = Image.fromarray(prev_img)
81
+ video_image = Image.open(video_frame)
82
+ video_image = video_image.resize((args.W, args.H), Image.Resampling.LANCZOS)
83
+ hybrid_mask = None
84
+
85
+ # composite mask types
86
+ if anim_args.hybrid_comp_mask_type == 'Depth': # get depth from last generation
87
+ hybrid_mask = Image.open(depth_frame)
88
+ elif anim_args.hybrid_comp_mask_type == 'Video Depth': # get video depth
89
+ video_depth = depth_model.predict(np.array(video_image), anim_args, root.half_precision)
90
+ depth_model.save(video_depth_frame, video_depth)
91
+ hybrid_mask = Image.open(video_depth_frame)
92
+ elif anim_args.hybrid_comp_mask_type == 'Blend': # create blend mask image
93
+ hybrid_mask = Image.blend(ImageOps.grayscale(prev_img_hybrid), ImageOps.grayscale(video_image), hybrid_comp_schedules['mask_blend_alpha'])
94
+ elif anim_args.hybrid_comp_mask_type == 'Difference': # create difference mask image
95
+ hybrid_mask = ImageChops.difference(ImageOps.grayscale(prev_img_hybrid), ImageOps.grayscale(video_image))
96
+
97
+ # optionally invert mask, if mask type is defined
98
+ if anim_args.hybrid_comp_mask_inverse and anim_args.hybrid_comp_mask_type != "None":
99
+ hybrid_mask = ImageOps.invert(hybrid_mask)
100
+
101
+ # if a mask type is selected, make composition
102
+ if hybrid_mask == None:
103
+ hybrid_comp = video_image
104
+ else:
105
+ # ensure grayscale
106
+ hybrid_mask = ImageOps.grayscale(hybrid_mask)
107
+ # equalization before
108
+ if anim_args.hybrid_comp_mask_equalize in ['Before', 'Both']:
109
+ hybrid_mask = ImageOps.equalize(hybrid_mask)
110
+ # contrast
111
+ hybrid_mask = ImageEnhance.Contrast(hybrid_mask).enhance(hybrid_comp_schedules['mask_contrast'])
112
+ # auto contrast with cutoffs lo/hi
113
+ if anim_args.hybrid_comp_mask_auto_contrast:
114
+ hybrid_mask = autocontrast_grayscale(np.array(hybrid_mask), hybrid_comp_schedules['mask_auto_contrast_cutoff_low'], hybrid_comp_schedules['mask_auto_contrast_cutoff_high'])
115
+ hybrid_mask = Image.fromarray(hybrid_mask)
116
+ hybrid_mask = ImageOps.grayscale(hybrid_mask)
117
+ if anim_args.hybrid_comp_save_extra_frames:
118
+ hybrid_mask.save(mask_frame)
119
+ # equalization after
120
+ if anim_args.hybrid_comp_mask_equalize in ['After', 'Both']:
121
+ hybrid_mask = ImageOps.equalize(hybrid_mask)
122
+ # do compositing and save
123
+ hybrid_comp = Image.composite(prev_img_hybrid, video_image, hybrid_mask)
124
+ if anim_args.hybrid_comp_save_extra_frames:
125
+ hybrid_comp.save(comp_frame)
126
+
127
+ # final blend of composite with prev_img, or just a blend if no composite is selected
128
+ hybrid_blend = Image.blend(prev_img_hybrid, hybrid_comp, hybrid_comp_schedules['alpha'])
129
+ if anim_args.hybrid_comp_save_extra_frames:
130
+ hybrid_blend.save(prev_frame)
131
+
132
+ prev_img = cv2.cvtColor(np.array(hybrid_blend), cv2.COLOR_RGB2BGR)
133
+
134
+ # restore to np array and return
135
+ return args, prev_img
136
+
137
+ def get_matrix_for_hybrid_motion(frame_idx, dimensions, inputfiles, hybrid_motion):
138
+ img1 = cv2.cvtColor(get_resized_image_from_filename(str(inputfiles[frame_idx-1]), dimensions), cv2.COLOR_BGR2GRAY)
139
+ img2 = cv2.cvtColor(get_resized_image_from_filename(str(inputfiles[frame_idx]), dimensions), cv2.COLOR_BGR2GRAY)
140
+ matrix = get_transformation_matrix_from_images(img1, img2, hybrid_motion)
141
+ print(f"Calculating {hybrid_motion} RANSAC matrix for frames {frame_idx} to {frame_idx+1}")
142
+ return matrix
143
+
144
+ def get_matrix_for_hybrid_motion_prev(frame_idx, dimensions, inputfiles, prev_img, hybrid_motion):
145
+ # first handle invalid images from cadence by returning default matrix
146
+ height, width = prev_img.shape[:2]
147
+ if height == 0 or width == 0 or prev_img != np.uint8:
148
+ return get_hybrid_motion_default_matrix(hybrid_motion)
149
+ else:
150
+ prev_img_gray = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)
151
+ img = cv2.cvtColor(get_resized_image_from_filename(str(inputfiles[frame_idx]), dimensions), cv2.COLOR_BGR2GRAY)
152
+ matrix = get_transformation_matrix_from_images(prev_img_gray, img, hybrid_motion)
153
+ print(f"Calculating {hybrid_motion} RANSAC matrix for frames {frame_idx} to {frame_idx+1}")
154
+ return matrix
155
+
156
+ def get_flow_for_hybrid_motion(frame_idx, dimensions, inputfiles, hybrid_frame_path, method, do_flow_visualization=False):
157
+ print(f"Calculating {method} optical flow for frames {frame_idx} to {frame_idx+1}")
158
+ i1 = get_resized_image_from_filename(str(inputfiles[frame_idx]), dimensions)
159
+ i2 = get_resized_image_from_filename(str(inputfiles[frame_idx+1]), dimensions)
160
+ flow = get_flow_from_images(i1, i2, method)
161
+ if do_flow_visualization:
162
+ save_flow_visualization(frame_idx, dimensions, flow, inputfiles, hybrid_frame_path)
163
+ return flow
164
+
165
+ def get_flow_for_hybrid_motion_prev(frame_idx, dimensions, inputfiles, hybrid_frame_path, prev_img, method, do_flow_visualization=False):
166
+ print(f"Calculating {method} optical flow for frames {frame_idx} to {frame_idx+1}")
167
+ # first handle invalid images from cadence by returning default matrix
168
+ height, width = prev_img.shape[:2]
169
+ if height == 0 or width == 0:
170
+ flow = get_hybrid_motion_default_flow(dimensions)
171
+ else:
172
+ i1 = prev_img.astype(np.uint8)
173
+ i2 = get_resized_image_from_filename(str(inputfiles[frame_idx]), dimensions)
174
+ flow = get_flow_from_images(i1, i2, method)
175
+ if do_flow_visualization:
176
+ save_flow_visualization(frame_idx, dimensions, flow, inputfiles, hybrid_frame_path)
177
+ return flow
178
+
179
+ def image_transform_ransac(image_cv2, xform, hybrid_motion, border_mode=cv2.BORDER_REPLICATE):
180
+ if hybrid_motion == "Perspective":
181
+ return image_transform_perspective(image_cv2, xform, border_mode=border_mode)
182
+ else: # Affine
183
+ return image_transform_affine(image_cv2, xform, border_mode=border_mode)
184
+
185
+ def image_transform_optical_flow(img, flow, border_mode=cv2.BORDER_REPLICATE, flow_reverse=False):
186
+ if not flow_reverse:
187
+ flow = -flow
188
+ h, w = img.shape[:2]
189
+ flow[:, :, 0] += np.arange(w)
190
+ flow[:, :, 1] += np.arange(h)[:,np.newaxis]
191
+ return remap(img, flow, border_mode)
192
+
193
+ def image_transform_affine(image_cv2, xform, border_mode=cv2.BORDER_REPLICATE):
194
+ return cv2.warpAffine(
195
+ image_cv2,
196
+ xform,
197
+ (image_cv2.shape[1],image_cv2.shape[0]),
198
+ borderMode=border_mode
199
+ )
200
+
201
+ def image_transform_perspective(image_cv2, xform, border_mode=cv2.BORDER_REPLICATE):
202
+ return cv2.warpPerspective(
203
+ image_cv2,
204
+ xform,
205
+ (image_cv2.shape[1], image_cv2.shape[0]),
206
+ borderMode=border_mode
207
+ )
208
+
209
+ def get_hybrid_motion_default_matrix(hybrid_motion):
210
+ if hybrid_motion == "Perspective":
211
+ arr = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
212
+ else:
213
+ arr = np.array([[1., 0., 0.], [0., 1., 0.]])
214
+ return arr
215
+
216
+ def get_hybrid_motion_default_flow(dimensions):
217
+ cols, rows = dimensions
218
+ flow = np.zeros((rows, cols, 2), np.float32)
219
+ return flow
220
+
221
+ def get_transformation_matrix_from_images(img1, img2, hybrid_motion, max_corners=200, quality_level=0.01, min_distance=30, block_size=3):
222
+ # Detect feature points in previous frame
223
+ prev_pts = cv2.goodFeaturesToTrack(img1,
224
+ maxCorners=max_corners,
225
+ qualityLevel=quality_level,
226
+ minDistance=min_distance,
227
+ blockSize=block_size)
228
+
229
+ if prev_pts is None or len(prev_pts) < 8 or img1 is None or img2 is None:
230
+ return get_hybrid_motion_default_matrix(hybrid_motion)
231
+
232
+ # Get optical flow
233
+ curr_pts, status, err = cv2.calcOpticalFlowPyrLK(img1, img2, prev_pts, None)
234
+
235
+ # Filter only valid points
236
+ idx = np.where(status==1)[0]
237
+ prev_pts = prev_pts[idx]
238
+ curr_pts = curr_pts[idx]
239
+
240
+ if len(prev_pts) < 8 or len(curr_pts) < 8:
241
+ return get_hybrid_motion_default_matrix(hybrid_motion)
242
+
243
+ if hybrid_motion == "Perspective": # Perspective - Find the transformation between points
244
+ transformation_matrix, mask = cv2.findHomography(prev_pts, curr_pts, cv2.RANSAC, 5.0)
245
+ return transformation_matrix
246
+ else: # Affine - Compute a rigid transformation (without depth, only scale + rotation + translation)
247
+ transformation_rigid_matrix, rigid_mask = cv2.estimateAffinePartial2D(prev_pts, curr_pts)
248
+ return transformation_rigid_matrix
249
+
250
+ def get_flow_from_images(i1, i2, method):
251
+ if method =="DIS Medium":
252
+ r = get_flow_from_images_DIS(i1, i2, cv2.DISOPTICAL_FLOW_PRESET_MEDIUM)
253
+ elif method =="DIS Fast":
254
+ r = get_flow_from_images_DIS(i1, i2, cv2.DISOPTICAL_FLOW_PRESET_FAST)
255
+ elif method =="DIS UltraFast":
256
+ r = get_flow_from_images_DIS(i1, i2, cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST)
257
+ elif method == "DenseRLOF": # requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python
258
+ r = get_flow_from_images_Dense_RLOF(i1, i2)
259
+ elif method == "SF": # requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python
260
+ r = get_flow_from_images_SF(i1, i2)
261
+ elif method =="Farneback Fine":
262
+ r = get_flow_from_images_Farneback(i1, i2, 'fine')
263
+ else: # Farneback Normal:
264
+ r = get_flow_from_images_Farneback(i1, i2)
265
+ return r
266
+
267
+ def get_flow_from_images_DIS(i1, i2, preset):
268
+ i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY)
269
+ i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY)
270
+ dis=cv2.DISOpticalFlow_create(preset)
271
+ return dis.calc(i1, i2, None)
272
+
273
+ def get_flow_from_images_Dense_RLOF(i1, i2, last_flow=None):
274
+ return cv2.optflow.calcOpticalFlowDenseRLOF(i1, i2, flow = last_flow)
275
+
276
+ def get_flow_from_images_SF(i1, i2, last_flow=None, layers = 3, averaging_block_size = 2, max_flow = 4):
277
+ return cv2.optflow.calcOpticalFlowSF(i1, i2, layers, averaging_block_size, max_flow)
278
+
279
+ def get_flow_from_images_Farneback(i1, i2, preset="normal", last_flow=None, pyr_scale = 0.5, levels = 3, winsize = 15, iterations = 3, poly_n = 5, poly_sigma = 1.2, flags = 0):
280
+ flags = cv2.OPTFLOW_FARNEBACK_GAUSSIAN # Specify the operation flags
281
+ pyr_scale = 0.5 # The image scale (<1) to build pyramids for each image
282
+ if preset == "fine":
283
+ levels = 13 # The number of pyramid layers, including the initial image
284
+ winsize = 77 # The averaging window size
285
+ iterations = 13 # The number of iterations at each pyramid level
286
+ poly_n = 15 # The size of the pixel neighborhood used to find polynomial expansion in each pixel
287
+ poly_sigma = 0.8 # The standard deviation of the Gaussian used to smooth derivatives used as a basis for the polynomial expansion
288
+ else: # "normal"
289
+ levels = 5 # The number of pyramid layers, including the initial image
290
+ winsize = 21 # The averaging window size
291
+ iterations = 5 # The number of iterations at each pyramid level
292
+ poly_n = 7 # The size of the pixel neighborhood used to find polynomial expansion in each pixel
293
+ poly_sigma = 1.2 # The standard deviation of the Gaussian used to smooth derivatives used as a basis for the polynomial expansion
294
+ i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY)
295
+ i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY)
296
+ flags = 0 # flags = cv2.OPTFLOW_USE_INITIAL_FLOW
297
+ flow = cv2.calcOpticalFlowFarneback(i1, i2, last_flow, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)
298
+ return flow
299
+
300
+ def save_flow_visualization(frame_idx, dimensions, flow, inputfiles, hybrid_frame_path):
301
+ flow_img_file = os.path.join(hybrid_frame_path, f"flow{frame_idx:05}.jpg")
302
+ flow_img = cv2.imread(str(inputfiles[frame_idx]))
303
+ flow_img = cv2.resize(flow_img, (dimensions[0], dimensions[1]), cv2.INTER_AREA)
304
+ flow_img = cv2.cvtColor(flow_img, cv2.COLOR_RGB2GRAY)
305
+ flow_img = cv2.cvtColor(flow_img, cv2.COLOR_GRAY2BGR)
306
+ flow_img = draw_flow_lines_in_grid_in_color(flow_img, flow)
307
+ flow_img = cv2.cvtColor(flow_img, cv2.COLOR_BGR2RGB)
308
+ cv2.imwrite(flow_img_file, flow_img)
309
+ print(f"Saved optical flow visualization: {flow_img_file}")
310
+
311
+ def draw_flow_lines_in_grid_in_color(img, flow, step=8, magnitude_multiplier=1, min_magnitude = 1, max_magnitude = 10000):
312
+ flow = flow * magnitude_multiplier
313
+ h, w = img.shape[:2]
314
+ y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
315
+ fx, fy = flow[y,x].T
316
+ lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
317
+ lines = np.int32(lines + 0.5)
318
+ vis = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
319
+ vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
320
+
321
+ mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
322
+ hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
323
+ hsv[...,0] = ang*180/np.pi/2
324
+ hsv[...,1] = 255
325
+ hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
326
+ bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
327
+ vis = cv2.add(vis, bgr)
328
+
329
+ # Iterate through the lines
330
+ for (x1, y1), (x2, y2) in lines:
331
+ # Calculate the magnitude of the line
332
+ magnitude = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)
333
+
334
+ # Only draw the line if it falls within the magnitude range
335
+ if min_magnitude <= magnitude <= max_magnitude:
336
+ b = int(bgr[y1, x1, 0])
337
+ g = int(bgr[y1, x1, 1])
338
+ r = int(bgr[y1, x1, 2])
339
+ color = (b, g, r)
340
+ cv2.arrowedLine(vis, (x1, y1), (x2, y2), color, thickness=1, tipLength=0.1)
341
+ return vis
342
+
343
+ def draw_flow_lines_in_color(img, flow, threshold=3, magnitude_multiplier=1, min_magnitude = 0, max_magnitude = 10000):
344
+ # h, w = img.shape[:2]
345
+ vis = img.copy() # Create a copy of the input image
346
+
347
+ # Find the locations in the flow field where the magnitude of the flow is greater than the threshold
348
+ mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
349
+ idx = np.where(mag > threshold)
350
+
351
+ # Create HSV image
352
+ hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
353
+ hsv[...,0] = ang*180/np.pi/2
354
+ hsv[...,1] = 255
355
+ hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
356
+
357
+ # Convert HSV image to BGR
358
+ bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
359
+
360
+ # Add color from bgr
361
+ vis = cv2.add(vis, bgr)
362
+
363
+ # Draw an arrow at each of these locations to indicate the direction of the flow
364
+ for i, (y, x) in enumerate(zip(idx[0], idx[1])):
365
+ # Calculate the magnitude of the line
366
+ x2 = x + magnitude_multiplier * int(flow[y, x, 0])
367
+ y2 = y + magnitude_multiplier * int(flow[y, x, 1])
368
+ magnitude = np.sqrt((x2 - x)**2 + (y2 - y)**2)
369
+
370
+ # Only draw the line if it falls within the magnitude range
371
+ if min_magnitude <= magnitude <= max_magnitude:
372
+ if i % random.randint(100, 200) == 0:
373
+ b = int(bgr[y, x, 0])
374
+ g = int(bgr[y, x, 1])
375
+ r = int(bgr[y, x, 2])
376
+ color = (b, g, r)
377
+ cv2.arrowedLine(vis, (x, y), (x2, y2), color, thickness=1, tipLength=0.25)
378
+
379
+ return vis
380
+
381
+ def autocontrast_grayscale(image, low_cutoff=0, high_cutoff=100):
382
+ # Perform autocontrast on a grayscale np array image.
383
+ # Find the minimum and maximum values in the image
384
+ min_val = np.percentile(image, low_cutoff)
385
+ max_val = np.percentile(image, high_cutoff)
386
+
387
+ # Scale the image so that the minimum value is 0 and the maximum value is 255
388
+ image = 255 * (image - min_val) / (max_val - min_val)
389
+
390
+ # Clip values that fall outside the range [0, 255]
391
+ image = np.clip(image, 0, 255)
392
+
393
+ return image
394
+
395
+ def get_resized_image_from_filename(im, dimensions):
396
+ img = cv2.imread(im)
397
+ return cv2.resize(img, (dimensions[0], dimensions[1]), cv2.INTER_AREA)
398
+
399
+ def remap(img, flow, border_mode = cv2.BORDER_REFLECT_101):
400
+ # copyMakeBorder doesn't support wrap, but supports replicate. Replaces wrap with reflect101.
401
+ if border_mode == cv2.BORDER_WRAP:
402
+ border_mode = cv2.BORDER_REFLECT_101
403
+ h, w = img.shape[:2]
404
+ displacement = int(h * 0.25), int(w * 0.25)
405
+ larger_img = cv2.copyMakeBorder(img, displacement[0], displacement[0], displacement[1], displacement[1], border_mode)
406
+ lh, lw = larger_img.shape[:2]
407
+ larger_flow = extend_flow(flow, lw, lh)
408
+ remapped_img = cv2.remap(larger_img, larger_flow, None, cv2.INTER_LINEAR, border_mode)
409
+ output_img = center_crop_image(remapped_img, w, h)
410
+ return output_img
411
+
412
+ def center_crop_image(img, w, h):
413
+ y, x, _ = img.shape
414
+ width_indent = int((x - w) / 2)
415
+ height_indent = int((y - h) / 2)
416
+ cropped_img = img[height_indent:y-height_indent, width_indent:x-width_indent]
417
+ return cropped_img
418
+
419
+ def extend_flow(flow, w, h):
420
+ # Get the shape of the original flow image
421
+ flow_h, flow_w = flow.shape[:2]
422
+ # Calculate the position of the image in the new image
423
+ x_offset = int((w - flow_w) / 2)
424
+ y_offset = int((h - flow_h) / 2)
425
+ # Generate the X and Y grids
426
+ x_grid, y_grid = np.meshgrid(np.arange(w), np.arange(h))
427
+ # Create the new flow image and set it to the X and Y grids
428
+ new_flow = np.dstack((x_grid, y_grid)).astype(np.float32)
429
+ # Shift the values of the original flow by the size of the border
430
+ flow[:,:,0] += x_offset
431
+ flow[:,:,1] += y_offset
432
+ # Overwrite the middle of the grid with the original flow
433
+ new_flow[y_offset:y_offset+flow_h, x_offset:x_offset+flow_w, :] = flow
434
+ # Return the extended image
435
+ return new_flow
436
+
extensions/deforum/scripts/deforum_helpers/image_sharpening.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+ def unsharp_mask(img, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0, mask=None):
5
+ if amount == 0:
6
+ return img
7
+ # Return a sharpened version of the image, using an unsharp mask.
8
+ # If mask is not None, only areas under mask are handled
9
+ blurred = cv2.GaussianBlur(img, kernel_size, sigma)
10
+ sharpened = float(amount + 1) * img - float(amount) * blurred
11
+ sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
12
+ sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
13
+ sharpened = sharpened.round().astype(np.uint8)
14
+ if threshold > 0:
15
+ low_contrast_mask = np.absolute(img - blurred) < threshold
16
+ np.copyto(sharpened, img, where=low_contrast_mask)
17
+ if mask is not None:
18
+ mask = np.array(mask)
19
+ masked_sharpened = cv2.bitwise_and(sharpened, sharpened, mask=mask)
20
+ masked_img = cv2.bitwise_and(img, img, mask=255-mask)
21
+ sharpened = cv2.add(masked_img, masked_sharpened)
22
+ return sharpened
extensions/deforum/scripts/deforum_helpers/load_images.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import os
3
+ from PIL import Image, ImageOps
4
+ import cv2
5
+ import numpy as np
6
+ import socket
7
+ import torchvision.transforms.functional as TF
8
+
9
+ def load_img(path : str, shape=None, use_alpha_as_mask=False):
10
+ # use_alpha_as_mask: Read the alpha channel of the image as the mask image
11
+ image = load_image(path)
12
+ if use_alpha_as_mask:
13
+ image = image.convert('RGBA')
14
+ else:
15
+ image = image.convert('RGB')
16
+
17
+ if shape is not None:
18
+ image = image.resize(shape, resample=Image.LANCZOS)
19
+
20
+ mask_image = None
21
+ if use_alpha_as_mask:
22
+ # Split alpha channel into a mask_image
23
+ red, green, blue, alpha = Image.Image.split(image)
24
+ mask_image = alpha.convert('L')
25
+ image = image.convert('RGB')
26
+
27
+ # check using init image alpha as mask if mask is not blank
28
+ extrema = mask_image.getextrema()
29
+ if (extrema == (0,0)) or extrema == (255,255):
30
+ print("use_alpha_as_mask==True: Using the alpha channel from the init image as a mask, but the alpha channel is blank.")
31
+ print("ignoring alpha as mask.")
32
+ mask_image = None
33
+
34
+ return image, mask_image
35
+
36
+ def load_image(image_path :str):
37
+ image = None
38
+ if image_path.startswith('http://') or image_path.startswith('https://'):
39
+ try:
40
+ host = socket.gethostbyname("www.google.com")
41
+ s = socket.create_connection((host, 80), 2)
42
+ s.close()
43
+ except:
44
+ raise ConnectionError("There is no active internet connection available - please use local masks and init files only.")
45
+
46
+ try:
47
+ response = requests.get(image_path, stream=True)
48
+ except requests.exceptions.RequestException as e:
49
+ raise ConnectionError("Failed to download image due to no internet connection. Error: {}".format(e))
50
+ if response.status_code == 404 or response.status_code != 200:
51
+ raise ConnectionError("Init image url or mask image url is not valid")
52
+ image = Image.open(response.raw).convert('RGB')
53
+ else:
54
+ if not os.path.exists(image_path):
55
+ raise RuntimeError("Init image path or mask image path is not valid")
56
+ image = Image.open(image_path).convert('RGB')
57
+
58
+ return image
59
+
60
+ def prepare_mask(mask_input, mask_shape, mask_brightness_adjust=1.0, mask_contrast_adjust=1.0):
61
+ """
62
+ prepares mask for use in webui
63
+ """
64
+ if isinstance(mask_input, Image.Image):
65
+ mask = mask_input
66
+ else :
67
+ mask = load_image(mask_input)
68
+ mask = mask.resize(mask_shape, resample=Image.LANCZOS)
69
+ if mask_brightness_adjust != 1:
70
+ mask = TF.adjust_brightness(mask, mask_brightness_adjust)
71
+ if mask_contrast_adjust != 1:
72
+ mask = TF.adjust_contrast(mask, mask_contrast_adjust)
73
+ mask = mask.convert('L')
74
+ return mask
75
+
76
+ def check_mask_for_errors(mask_input, invert_mask=False):
77
+ extrema = mask_input.getextrema()
78
+ if (invert_mask):
79
+ if extrema == (255,255):
80
+ print("after inverting mask will be blank. ignoring mask")
81
+ return None
82
+ elif extrema == (0,0):
83
+ print("mask is blank. ignoring mask")
84
+ return None
85
+ else:
86
+ return mask_input
87
+
88
+ def get_mask(args):
89
+ return check_mask_for_errors(
90
+ prepare_mask(args.mask_file, (args.W, args.H), args.mask_contrast_adjust, args.mask_brightness_adjust)
91
+ )
92
+
93
+ def get_mask_from_file(mask_file, args):
94
+ return check_mask_for_errors(
95
+ prepare_mask(mask_file, (args.W, args.H), args.mask_contrast_adjust, args.mask_brightness_adjust)
96
+ )
97
+
98
+ def blank_if_none(mask, w, h, mode):
99
+ return Image.new(mode, (w, h), (0)) if mask is None else mask
100
+
101
+ def none_if_blank(mask):
102
+ return None if mask.getextrema() == (0,0) else mask
extensions/deforum/scripts/deforum_helpers/noise.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from PIL import ImageOps
4
+ import math
5
+ from .animation import sample_to_cv2
6
+ import cv2
7
+
8
+ deforum_noise_gen = torch.Generator(device='cpu')
9
+
10
+ # 2D Perlin noise in PyTorch https://gist.github.com/vadimkantorov/ac1b097753f217c5c11bc2ff396e0a57
11
+ def rand_perlin_2d(shape, res, fade = lambda t: 6*t**5 - 15*t**4 + 10*t**3):
12
+ delta = (res[0] / shape[0], res[1] / shape[1])
13
+ d = (shape[0] // res[0], shape[1] // res[1])
14
+
15
+ grid = torch.stack(torch.meshgrid(torch.arange(0, res[0], delta[0]), torch.arange(0, res[1], delta[1]), indexing='ij'), dim = -1) % 1
16
+ angles = 2*math.pi*torch.rand(res[0]+1, res[1]+1, generator=deforum_noise_gen)
17
+ gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim = -1)
18
+
19
+ tile_grads = lambda slice1, slice2: gradients[slice1[0]:slice1[1], slice2[0]:slice2[1]].repeat_interleave(d[0], 0).repeat_interleave(d[1], 1)
20
+ dot = lambda grad, shift: (torch.stack((grid[:shape[0],:shape[1],0] + shift[0], grid[:shape[0],:shape[1], 1] + shift[1] ), dim = -1) * grad[:shape[0], :shape[1]]).sum(dim = -1)
21
+
22
+ n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0])
23
+ n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0])
24
+ n01 = dot(tile_grads([0, -1],[1, None]), [0, -1])
25
+ n11 = dot(tile_grads([1, None], [1, None]), [-1,-1])
26
+ t = fade(grid[:shape[0], :shape[1]])
27
+ return math.sqrt(2) * torch.lerp(torch.lerp(n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1])
28
+
29
+ def rand_perlin_2d_octaves(shape, res, octaves=1, persistence=0.5):
30
+ noise = torch.zeros(shape)
31
+ frequency = 1
32
+ amplitude = 1
33
+ for _ in range(int(octaves)):
34
+ noise += amplitude * rand_perlin_2d(shape, (frequency*res[0], frequency*res[1]))
35
+ frequency *= 2
36
+ amplitude *= persistence
37
+ return noise
38
+
39
+ def condition_noise_mask(noise_mask, invert_mask = False):
40
+ if invert_mask:
41
+ noise_mask = ImageOps.invert(noise_mask)
42
+ noise_mask = np.array(noise_mask.convert("L"))
43
+ noise_mask = noise_mask.astype(np.float32) / 255.0
44
+ noise_mask = np.around(noise_mask, decimals=0)
45
+ noise_mask = torch.from_numpy(noise_mask)
46
+ #noise_mask = torch.round(noise_mask)
47
+ return noise_mask
48
+
49
+ def add_noise(sample, noise_amt: float, seed: int, noise_type: str, noise_args, noise_mask = None, invert_mask = False):
50
+ deforum_noise_gen.manual_seed(seed) # Reproducibility
51
+ sample2dshape = (sample.shape[0], sample.shape[1]) #sample is cv2, so height - width
52
+ noise = torch.randn((sample.shape[2], sample.shape[0], sample.shape[1]), generator=deforum_noise_gen) # White noise
53
+ if noise_type == 'perlin':
54
+ # rand_perlin_2d_octaves is between -1 and 1, so we need to shift it to be between 0 and 1
55
+ # print(sample.shape)
56
+ noise = noise * ((rand_perlin_2d_octaves(sample2dshape, (int(noise_args[0]), int(noise_args[1])), octaves=noise_args[2], persistence=noise_args[3]) + torch.ones(sample2dshape)) / 2)
57
+ if noise_mask is not None:
58
+ noise_mask = condition_noise_mask(noise_mask, invert_mask)
59
+ noise_to_add = sample_to_cv2(noise * noise_mask)
60
+ else:
61
+ noise_to_add = sample_to_cv2(noise)
62
+ sample = cv2.addWeighted(sample, 1-noise_amt, noise_to_add, noise_amt, 0)
63
+
64
+ return sample
extensions/deforum/scripts/deforum_helpers/parseq_adapter.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+ import logging
4
+ import operator
5
+ from operator import itemgetter
6
+
7
+ import numpy as np
8
+ import pandas as pd
9
+ import requests
10
+
11
+ from .animation_key_frames import DeformAnimKeys
12
+
13
+ logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
14
+
15
+
16
+ class ParseqAnimKeys():
17
+ def __init__(self, parseq_args, anim_args):
18
+
19
+ # Resolve manifest either directly from supplied value
20
+ # or via supplied URL
21
+ manifestOrUrl = parseq_args.parseq_manifest.strip()
22
+ if (manifestOrUrl.startswith('http')):
23
+ logging.info(f"Loading Parseq manifest from URL: {manifestOrUrl}")
24
+ try:
25
+ body = requests.get(manifestOrUrl).text
26
+ logging.debug(f"Loaded remote manifest: {body}")
27
+ self.parseq_json = json.loads(body)
28
+
29
+ # Add the parseq manifest without the detailed frame data to parseq_args.
30
+ # This ensures it will be saved in the settings file, so that you can always
31
+ # see exactly what parseq prompts and keyframes were used, even if what the URL
32
+ # points to changes.
33
+ parseq_args.fetched_parseq_manifest_summary = copy.deepcopy(self.parseq_json)
34
+ if parseq_args.fetched_parseq_manifest_summary['rendered_frames']:
35
+ del parseq_args.fetched_parseq_manifest_summary['rendered_frames']
36
+ if parseq_args.fetched_parseq_manifest_summary['rendered_frames_meta']:
37
+ del parseq_args.fetched_parseq_manifest_summary['rendered_frames_meta']
38
+
39
+ except Exception as e:
40
+ logging.error(f"Unable to load Parseq manifest from URL: {manifestOrUrl}")
41
+ raise e
42
+ else:
43
+ self.parseq_json = json.loads(manifestOrUrl)
44
+
45
+ self.default_anim_keys = DeformAnimKeys(anim_args)
46
+ self.rendered_frames = self.parseq_json['rendered_frames']
47
+ self.max_frame = self.get_max('frame')
48
+ count_defined_frames = len(self.rendered_frames)
49
+ expected_defined_frames = self.max_frame+1 # frames are 0-indexed
50
+
51
+ self.required_frames = anim_args.max_frames
52
+
53
+ if (expected_defined_frames != count_defined_frames):
54
+ logging.warning(f"There may be duplicated or missing frame data in the Parseq input: expected {expected_defined_frames} frames including frame 0 because the highest frame number is {self.max_frame}, but there are {count_defined_frames} frames defined.")
55
+
56
+ if (anim_args.max_frames > count_defined_frames):
57
+ logging.info(f"Parseq data defines {count_defined_frames} frames, but the requested animation is {anim_args.max_frames} frames. The last Parseq frame definition will be duplicated to match the expected frame count.")
58
+ if (anim_args.max_frames < count_defined_frames):
59
+ logging.info(f"Parseq data defines {count_defined_frames} frames, but the requested animation is {anim_args.max_frames} frames. The last Parseq frame definitions will be ignored.")
60
+ else:
61
+ logging.info(f"Parseq data defines {count_defined_frames} frames.")
62
+
63
+ # Parseq treats input values as absolute values. So if you want to
64
+ # progressively rotate 180 degrees over 4 frames, you specify: 45, 90, 135, 180.
65
+ # However, many animation parameters are relative to the previous frame if there is enough
66
+ # loopback strength. So if you want to rotate 180 degrees over 5 frames, the animation engine expects:
67
+ # 45, 45, 45, 45. Therefore, for such parameter, we use the fact that Parseq supplies delta values.
68
+ optional_delta = '_delta' if parseq_args.parseq_use_deltas else ''
69
+ self.angle_series = self.parseq_to_anim_series('angle' + optional_delta)
70
+ self.zoom_series = self.parseq_to_anim_series('zoom' + optional_delta)
71
+ self.translation_x_series = self.parseq_to_anim_series('translation_x' + optional_delta)
72
+ self.translation_y_series = self.parseq_to_anim_series('translation_y' + optional_delta)
73
+ self.translation_z_series = self.parseq_to_anim_series('translation_z' + optional_delta)
74
+ self.rotation_3d_x_series = self.parseq_to_anim_series('rotation_3d_x' + optional_delta)
75
+ self.rotation_3d_y_series = self.parseq_to_anim_series('rotation_3d_y' + optional_delta)
76
+ self.rotation_3d_z_series = self.parseq_to_anim_series('rotation_3d_z' + optional_delta)
77
+ self.perspective_flip_theta_series = self.parseq_to_anim_series('perspective_flip_theta' + optional_delta)
78
+ self.perspective_flip_phi_series = self.parseq_to_anim_series('perspective_flip_phi' + optional_delta)
79
+ self.perspective_flip_gamma_series = self.parseq_to_anim_series('perspective_flip_gamma' + optional_delta)
80
+
81
+ # Non-motion animation args
82
+ self.perspective_flip_fv_series = self.parseq_to_anim_series('perspective_flip_fv')
83
+ self.noise_schedule_series = self.parseq_to_anim_series('noise')
84
+ self.strength_schedule_series = self.parseq_to_anim_series('strength')
85
+ self.sampler_schedule_series = self.parseq_to_anim_series('sampler_schedule')
86
+ self.contrast_schedule_series = self.parseq_to_anim_series('contrast')
87
+ self.cfg_scale_schedule_series = self.parseq_to_anim_series('scale')
88
+ self.steps_schedule_series = self.parseq_to_anim_series("steps_schedule")
89
+ self.seed_schedule_series = self.parseq_to_anim_series('seed')
90
+ self.fov_series = self.parseq_to_anim_series('fov')
91
+ self.near_series = self.parseq_to_anim_series('near')
92
+ self.far_series = self.parseq_to_anim_series('far')
93
+ self.prompts = self.parseq_to_anim_series('deforum_prompt') # formatted as "{positive} --neg {negative}"
94
+ self.subseed_series = self.parseq_to_anim_series('subseed')
95
+ self.subseed_strength_series = self.parseq_to_anim_series('subseed_strength')
96
+ self.kernel_schedule_series = self.parseq_to_anim_series('antiblur_kernel')
97
+ self.sigma_schedule_series = self.parseq_to_anim_series('antiblur_sigma')
98
+ self.amount_schedule_series = self.parseq_to_anim_series('antiblur_amount')
99
+ self.threshold_schedule_series = self.parseq_to_anim_series('antiblur_threshold')
100
+
101
+ # Config:
102
+ # TODO this is currently ignored. User must ensure the output FPS set in parseq
103
+ # matches the one set in Deforum to avoid unexpected results.
104
+ self.config_output_fps = self.parseq_json['options']['output_fps']
105
+
106
+ def get_max(self, seriesName):
107
+ return max(self.rendered_frames, key=itemgetter(seriesName))[seriesName]
108
+
109
+ def parseq_to_anim_series(self, seriesName):
110
+
111
+ # Check if valus is present in first frame of JSON data. If not, assume it's undefined.
112
+ # The Parseq contract is that the first frame (at least) must define values for all fields.
113
+ try:
114
+ if self.rendered_frames[0][seriesName] is not None:
115
+ logging.info(f"Found {seriesName} in first frame of Parseq data. Assuming it's defined.")
116
+ except KeyError:
117
+ return None
118
+
119
+ key_frame_series = pd.Series([np.nan for a in range(self.required_frames)])
120
+
121
+ for frame in self.rendered_frames:
122
+ frame_idx = frame['frame']
123
+ if frame_idx < self.required_frames:
124
+ if not np.isnan(key_frame_series[frame_idx]):
125
+ logging.warning(f"Duplicate frame definition {frame_idx} detected for data {seriesName}. Latest wins.")
126
+ key_frame_series[frame_idx] = frame[seriesName]
127
+
128
+ # If the animation will have more frames than Parseq defines,
129
+ # duplicate final value to match the required frame count.
130
+ while (frame_idx < self.required_frames):
131
+ key_frame_series[frame_idx] = operator.itemgetter(-1)(self.rendered_frames)[seriesName]
132
+ frame_idx += 1
133
+
134
+ return key_frame_series
135
+
136
+ # fallback to anim_args if the series is not defined in the Parseq data
137
+ def __getattribute__(inst, name):
138
+ try:
139
+ definedField = super(ParseqAnimKeys, inst).__getattribute__(name)
140
+ except AttributeError:
141
+ # No field with this name has been explicitly extracted from the JSON data.
142
+ # It must be a new parameter. Let's see if it's in the raw JSON.
143
+
144
+ # parseq doesn't use _series, _schedule or _schedule_series suffixes in the
145
+ # JSON data - remove them.
146
+ strippableSuffixes = ['_series', '_schedule']
147
+ parseqName = name
148
+ while any(parseqName.endswith(suffix) for suffix in strippableSuffixes):
149
+ for suffix in strippableSuffixes:
150
+ if parseqName.endswith(suffix):
151
+ parseqName = parseqName[:-len(suffix)]
152
+
153
+ # returns None if not defined in Parseq JSON data
154
+ definedField = inst.parseq_to_anim_series(parseqName)
155
+ if (definedField is not None):
156
+ # add the field to the instance so we don't compute it again.
157
+ setattr(inst, name, definedField)
158
+
159
+ if (definedField is not None):
160
+ return definedField
161
+ else:
162
+ logging.info(f"Data for {name} not defined in Parseq data (looked for: '{parseqName}'). Falling back to standard Deforum values.")
163
+ return getattr(inst.default_anim_keys, name)
164
+
extensions/deforum/scripts/deforum_helpers/prompt.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ def check_is_number(value):
4
+ float_pattern = r'^(?=.)([+-]?([0-9]*)(\.([0-9]+))?)$'
5
+ return re.match(float_pattern, value)
6
+
7
+ def parse_weight(match, frame = 0)->float:
8
+ import numexpr
9
+ w_raw = match.group("weight")
10
+ if w_raw == None:
11
+ return 1
12
+ if check_is_number(w_raw):
13
+ return float(w_raw)
14
+ else:
15
+ t = frame
16
+ if len(w_raw) < 3:
17
+ print('the value inside `-characters cannot represent a math function')
18
+ return 1
19
+ return float(numexpr.evaluate(w_raw[1:-1]))
20
+
21
+ def split_weighted_subprompts(text, frame = 0):
22
+ """
23
+ splits the prompt based on deforum webui implementation, moved from generate.py
24
+ """
25
+ math_parser = re.compile("""
26
+ (?P<weight>(
27
+ `[\S\s]*?`# a math function wrapped in `-characters
28
+ ))
29
+ """, re.VERBOSE)
30
+
31
+ parsed_prompt = re.sub(math_parser, lambda m: str(parse_weight(m, frame)), text)
32
+
33
+ negative_prompts = []
34
+ positive_prompts = []
35
+
36
+ prompt_split = parsed_prompt.split("--neg")
37
+ if len(prompt_split) > 1:
38
+ positive_prompts, negative_prompts = parsed_prompt.split("--neg") #TODO: add --neg to vanilla Deforum for compat
39
+ else:
40
+ positive_prompts = prompt_split[0]
41
+ negative_prompts = ""
42
+
43
+ return positive_prompts, negative_prompts
44
+
45
+ def interpolate_prompts(animation_prompts, max_frames):
46
+ import numpy as np
47
+ import pandas as pd
48
+ # Get prompts sorted by keyframe
49
+ sorted_prompts = sorted(animation_prompts.items(), key=lambda item: int(item[0]))
50
+
51
+ # Setup container for interpolated prompts
52
+ prompt_series = pd.Series([np.nan for a in range(max_frames)])
53
+
54
+ # For every keyframe prompt except the last
55
+ for i in range(0,len(sorted_prompts)-1):
56
+
57
+ # Get current and next keyframe
58
+ current_frame = int(sorted_prompts[i][0])
59
+ next_frame = int(sorted_prompts[i+1][0])
60
+
61
+ # Ensure there's no weird ordering issues or duplication in the animation prompts
62
+ # (unlikely because we sort above, and the json parser will strip dupes)
63
+ if current_frame>=next_frame:
64
+ print(f"WARNING: Sequential prompt keyframes {i}:{current_frame} and {i+1}:{next_frame} are not monotonously increasing; skipping interpolation.")
65
+ continue
66
+
67
+ # Get current and next keyframes' positive and negative prompts (if any)
68
+ current_prompt = sorted_prompts[i][1]
69
+ next_prompt = sorted_prompts[i+1][1]
70
+ current_positive, current_negative, *_ = current_prompt.split("--neg") + [None]
71
+ next_positive, next_negative, *_ = next_prompt.split("--neg") + [None]
72
+
73
+ # Calculate how much to shift the weight from current to next prompt at each frame
74
+ weight_step = 1/(next_frame-current_frame)
75
+
76
+ # Apply weighted prompt interpolation for each frame between current and next keyframe
77
+ # using the syntax: prompt1 :weight1 AND prompt1 :weight2 --neg nprompt1 :weight1 AND nprompt1 :weight2
78
+ # (See: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#composable-diffusion )
79
+ for f in range(current_frame,next_frame):
80
+ next_weight = weight_step * (f-current_frame)
81
+ current_weight = 1 - next_weight
82
+
83
+ # We will build the prompt incrementally depending on which prompts are present
84
+ prompt_series[f] = ''
85
+
86
+ # Cater for the case where neither, either or both current & next have positive prompts:
87
+ if current_positive:
88
+ prompt_series[f] += f"{current_positive} :{current_weight}"
89
+ if current_positive and next_positive:
90
+ prompt_series[f] += f" AND "
91
+ if next_positive:
92
+ prompt_series[f] += f"{next_positive} :{next_weight}"
93
+
94
+ # Cater for the case where neither, either or both current & next have negative prompts:
95
+ if current_negative or next_negative:
96
+ prompt_series[f] += " --neg "
97
+ if current_negative:
98
+ prompt_series[f] += f" {current_negative} :{current_weight}"
99
+ if current_negative and next_negative:
100
+ prompt_series[f] += f" AND "
101
+ if next_negative:
102
+ prompt_series[f] += f" {next_negative} :{next_weight}"
103
+
104
+ # Set explicitly declared keyframe prompts (overwriting interpolated values at the keyframe idx). This ensures:
105
+ # - That final prompt is set, and
106
+ # - Gives us a chance to emit warnings if any keyframe prompts are already using composable diffusion
107
+ for i, prompt in animation_prompts.items():
108
+ prompt_series[int(i)] = prompt
109
+ if ' AND ' in prompt:
110
+ print(f"WARNING: keyframe {i}'s prompt is using composable diffusion (aka the 'AND' keyword). This will cause unexpected behaviour with interpolation.")
111
+
112
+ # Return the filled series, in case max_frames is greater than the last keyframe or any ranges were skipped.
113
+ return prompt_series.ffill().bfill()
extensions/deforum/scripts/deforum_helpers/render.py ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import pandas as pd
4
+ import cv2
5
+ import numpy as np
6
+ from PIL import Image, ImageOps
7
+ from .rich import console
8
+
9
+ from .generate import generate
10
+ from .noise import add_noise
11
+ from .animation import sample_from_cv2, sample_to_cv2, anim_frame_warp
12
+ from .animation_key_frames import DeformAnimKeys, LooperAnimKeys
13
+ from .video_audio_utilities import get_frame_name, get_next_frame
14
+ from .depth import DepthModel
15
+ from .colors import maintain_colors
16
+ from .parseq_adapter import ParseqAnimKeys
17
+ from .seed import next_seed
18
+ from .blank_frame_reroll import blank_frame_reroll
19
+ from .image_sharpening import unsharp_mask
20
+ from .load_images import get_mask, load_img, get_mask_from_file
21
+ from .hybrid_video import hybrid_generation, hybrid_composite
22
+ from .hybrid_video import get_matrix_for_hybrid_motion, get_matrix_for_hybrid_motion_prev, get_flow_for_hybrid_motion, get_flow_for_hybrid_motion_prev, image_transform_ransac, image_transform_optical_flow
23
+ from .save_images import save_image
24
+ from .composable_masks import compose_mask_with_check
25
+ from .settings import get_keys_to_exclude
26
+ from .deforum_controlnet import unpack_controlnet_vids, is_controlnet_enabled
27
+ # Webui
28
+ from modules.shared import opts, cmd_opts, state, sd_model
29
+ from modules import lowvram, devices, sd_hijack
30
+
31
+ def render_animation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, animation_prompts, root):
32
+ # handle hybrid video generation
33
+ if anim_args.animation_mode in ['2D','3D']:
34
+ if anim_args.hybrid_composite or anim_args.hybrid_motion in ['Affine', 'Perspective', 'Optical Flow']:
35
+ args, anim_args, inputfiles = hybrid_generation(args, anim_args, root)
36
+ # path required by hybrid functions, even if hybrid_comp_save_extra_frames is False
37
+ hybrid_frame_path = os.path.join(args.outdir, 'hybridframes')
38
+
39
+ # handle controlnet video input frames generation
40
+ if is_controlnet_enabled(controlnet_args):
41
+ unpack_controlnet_vids(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, animation_prompts, root)
42
+
43
+ # use parseq if manifest is provided
44
+ use_parseq = parseq_args.parseq_manifest != None and parseq_args.parseq_manifest.strip()
45
+ # expand key frame strings to values
46
+ keys = DeformAnimKeys(anim_args) if not use_parseq else ParseqAnimKeys(parseq_args, anim_args)
47
+ loopSchedulesAndData = LooperAnimKeys(loop_args, anim_args)
48
+ # resume animation
49
+ start_frame = 0
50
+ if anim_args.resume_from_timestring:
51
+ for tmp in os.listdir(args.outdir):
52
+ if ".txt" in tmp :
53
+ pass
54
+ else:
55
+ filename = tmp.split("_")
56
+ # don't use saved depth maps to count number of frames
57
+ if anim_args.resume_timestring in filename and "depth" not in filename:
58
+ start_frame += 1
59
+ #start_frame = start_frame - 1
60
+
61
+ # create output folder for the batch
62
+ os.makedirs(args.outdir, exist_ok=True)
63
+ print(f"Saving animation frames to:\n{args.outdir}")
64
+
65
+ # save settings for the batch
66
+ exclude_keys = get_keys_to_exclude('general')
67
+ settings_filename = os.path.join(args.outdir, f"{args.timestring}_settings.txt")
68
+ with open(settings_filename, "w+", encoding="utf-8") as f:
69
+ args.__dict__["prompts"] = animation_prompts
70
+ s = {}
71
+ for d in [dict(args.__dict__), dict(anim_args.__dict__), dict(parseq_args.__dict__), dict(loop_args.__dict__)]:
72
+ for key, value in d.items():
73
+ if key not in exclude_keys:
74
+ s[key] = value
75
+ json.dump(s, f, ensure_ascii=False, indent=4)
76
+
77
+ # resume from timestring
78
+ if anim_args.resume_from_timestring:
79
+ args.timestring = anim_args.resume_timestring
80
+
81
+ # Always enable pseudo-3d with parseq. No need for an extra toggle:
82
+ # Whether it's used or not in practice is defined by the schedules
83
+ if use_parseq:
84
+ anim_args.flip_2d_perspective = True
85
+
86
+ # expand prompts out to per-frame
87
+ if use_parseq:
88
+ prompt_series = keys.prompts
89
+ else:
90
+ prompt_series = pd.Series([np.nan for a in range(anim_args.max_frames)])
91
+ for i, prompt in animation_prompts.items():
92
+ prompt_series[int(i)] = prompt
93
+ prompt_series = prompt_series.ffill().bfill()
94
+
95
+ # check for video inits
96
+ using_vid_init = anim_args.animation_mode == 'Video Input'
97
+
98
+ # load depth model for 3D
99
+ predict_depths = (anim_args.animation_mode == '3D' and anim_args.use_depth_warping) or anim_args.save_depth_maps
100
+ predict_depths = predict_depths or (anim_args.hybrid_composite and anim_args.hybrid_comp_mask_type in ['Depth','Video Depth'])
101
+ if predict_depths:
102
+ depth_model = DepthModel('cpu' if cmd_opts.lowvram or cmd_opts.medvram else root.device)
103
+ depth_model.load_midas(root.models_path, root.half_precision)
104
+ if anim_args.midas_weight < 1.0:
105
+ depth_model.load_adabins(root.models_path)
106
+ # depth-based hybrid composite mask requires saved depth maps
107
+ if anim_args.hybrid_composite and anim_args.hybrid_comp_mask_type =='Depth':
108
+ anim_args.save_depth_maps = True
109
+ else:
110
+ depth_model = None
111
+ anim_args.save_depth_maps = False
112
+
113
+ # state for interpolating between diffusion steps
114
+ turbo_steps = 1 if using_vid_init else int(anim_args.diffusion_cadence)
115
+ turbo_prev_image, turbo_prev_frame_idx = None, 0
116
+ turbo_next_image, turbo_next_frame_idx = None, 0
117
+
118
+ # resume animation
119
+ prev_img = None
120
+ color_match_sample = None
121
+ if anim_args.resume_from_timestring:
122
+ last_frame = start_frame-1
123
+ if turbo_steps > 1:
124
+ last_frame -= last_frame%turbo_steps
125
+ path = os.path.join(args.outdir,f"{args.timestring}_{last_frame:05}.png")
126
+ img = cv2.imread(path)
127
+ #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Changed the colors on resume
128
+ prev_img = img
129
+ if anim_args.color_coherence != 'None':
130
+ color_match_sample = img
131
+ if turbo_steps > 1:
132
+ turbo_next_image, turbo_next_frame_idx = prev_img, last_frame
133
+ turbo_prev_image, turbo_prev_frame_idx = turbo_next_image, turbo_next_frame_idx
134
+ start_frame = last_frame+turbo_steps
135
+
136
+ args.n_samples = 1
137
+ frame_idx = start_frame
138
+
139
+ # reset the mask vals as they are overwritten in the compose_mask algorithm
140
+ mask_vals = {}
141
+ noise_mask_vals = {}
142
+
143
+ mask_vals['everywhere'] = Image.new('1', (args.W, args.H), 1)
144
+ noise_mask_vals['everywhere'] = Image.new('1', (args.W, args.H), 1)
145
+
146
+ mask_image = None
147
+
148
+ if args.use_init and args.init_image != None and args.init_image != '':
149
+ _, mask_image = load_img(args.init_image,
150
+ shape=(args.W, args.H),
151
+ use_alpha_as_mask=args.use_alpha_as_mask)
152
+ mask_vals['init_mask'] = mask_image
153
+ noise_mask_vals['init_mask'] = mask_image
154
+
155
+ # Grab the first frame masks since they wont be provided until next frame
156
+ if mask_image is None and args.use_mask:
157
+ mask_vals['init_mask'] = get_mask(args)
158
+ noise_mask_vals['init_mask'] = get_mask(args) # TODO?: add a different default noise mask
159
+
160
+ if anim_args.use_mask_video:
161
+ mask_vals['video_mask'] = get_mask_from_file(get_next_frame(args.outdir, anim_args.video_mask_path, frame_idx, True), args)
162
+ noise_mask_vals['video_mask'] = get_mask_from_file(get_next_frame(args.outdir, anim_args.video_mask_path, frame_idx, True), args)
163
+ else:
164
+ mask_vals['video_mask'] = None
165
+ noise_mask_vals['video_mask'] = None
166
+
167
+ #Webui
168
+ state.job_count = anim_args.max_frames
169
+
170
+ while frame_idx < anim_args.max_frames:
171
+ #Webui
172
+ state.job = f"frame {frame_idx + 1}/{anim_args.max_frames}"
173
+ state.job_no = frame_idx + 1
174
+ if state.interrupted:
175
+ break
176
+
177
+ print(f"\033[36mAnimation frame: \033[0m{frame_idx}/{anim_args.max_frames} ")
178
+
179
+ noise = keys.noise_schedule_series[frame_idx]
180
+ strength = keys.strength_schedule_series[frame_idx]
181
+ scale = keys.cfg_scale_schedule_series[frame_idx]
182
+ contrast = keys.contrast_schedule_series[frame_idx]
183
+ kernel = int(keys.kernel_schedule_series[frame_idx])
184
+ sigma = keys.sigma_schedule_series[frame_idx]
185
+ amount = keys.amount_schedule_series[frame_idx]
186
+ threshold = keys.threshold_schedule_series[frame_idx]
187
+ hybrid_comp_schedules = {
188
+ "alpha": keys.hybrid_comp_alpha_schedule_series[frame_idx],
189
+ "mask_blend_alpha": keys.hybrid_comp_mask_blend_alpha_schedule_series[frame_idx],
190
+ "mask_contrast": keys.hybrid_comp_mask_contrast_schedule_series[frame_idx],
191
+ "mask_auto_contrast_cutoff_low": int(keys.hybrid_comp_mask_auto_contrast_cutoff_low_schedule_series[frame_idx]),
192
+ "mask_auto_contrast_cutoff_high": int(keys.hybrid_comp_mask_auto_contrast_cutoff_high_schedule_series[frame_idx]),
193
+ }
194
+ scheduled_sampler_name = None
195
+ scheduled_clipskip = None
196
+ mask_seq = None
197
+ noise_mask_seq = None
198
+ if anim_args.enable_steps_scheduling and keys.steps_schedule_series[frame_idx] is not None:
199
+ args.steps = int(keys.steps_schedule_series[frame_idx])
200
+ if anim_args.enable_sampler_scheduling and keys.sampler_schedule_series[frame_idx] is not None:
201
+ scheduled_sampler_name = keys.sampler_schedule_series[frame_idx].casefold()
202
+ if anim_args.enable_clipskip_scheduling and keys.clipskip_schedule_series[frame_idx] is not None:
203
+ scheduled_clipskip = int(keys.clipskip_schedule_series[frame_idx])
204
+ if args.use_mask and keys.mask_schedule_series[frame_idx] is not None:
205
+ mask_seq = keys.mask_schedule_series[frame_idx]
206
+ if anim_args.use_noise_mask and keys.noise_mask_schedule_series[frame_idx] is not None:
207
+ noise_mask_seq = keys.noise_mask_schedule_series[frame_idx]
208
+
209
+ if args.use_mask and not anim_args.use_noise_mask:
210
+ noise_mask_seq = mask_seq
211
+
212
+ depth = None
213
+
214
+ if anim_args.animation_mode == '3D' and (cmd_opts.lowvram or cmd_opts.medvram):
215
+ # Unload the main checkpoint and load the depth model
216
+ lowvram.send_everything_to_cpu()
217
+ sd_hijack.model_hijack.undo_hijack(sd_model)
218
+ devices.torch_gc()
219
+ depth_model.to(root.device)
220
+
221
+ # emit in-between frames
222
+ if turbo_steps > 1:
223
+ tween_frame_start_idx = max(0, frame_idx-turbo_steps)
224
+ for tween_frame_idx in range(tween_frame_start_idx, frame_idx):
225
+ tween = float(tween_frame_idx - tween_frame_start_idx + 1) / float(frame_idx - tween_frame_start_idx)
226
+ print(f" Creating in-between frame: {tween_frame_idx}; tween:{tween:0.2f};")
227
+
228
+ advance_prev = turbo_prev_image is not None and tween_frame_idx > turbo_prev_frame_idx
229
+ advance_next = tween_frame_idx > turbo_next_frame_idx
230
+
231
+ if depth_model is not None:
232
+ assert(turbo_next_image is not None)
233
+ depth = depth_model.predict(turbo_next_image, anim_args, root.half_precision)
234
+
235
+ if advance_prev:
236
+ turbo_prev_image, _ = anim_frame_warp(turbo_prev_image, args, anim_args, keys, tween_frame_idx, depth_model, depth=depth, device=root.device, half_precision=root.half_precision)
237
+ if advance_next:
238
+ turbo_next_image, _ = anim_frame_warp(turbo_next_image, args, anim_args, keys, tween_frame_idx, depth_model, depth=depth, device=root.device, half_precision=root.half_precision)
239
+
240
+ # hybrid video motion - warps turbo_prev_image or turbo_next_image to match motion
241
+ if tween_frame_idx > 0:
242
+ if anim_args.hybrid_motion in ['Affine', 'Perspective']:
243
+ if anim_args.hybrid_motion_use_prev_img:
244
+ if advance_prev:
245
+ matrix = get_matrix_for_hybrid_motion_prev(tween_frame_idx, (args.W, args.H), inputfiles, turbo_prev_image, anim_args.hybrid_motion)
246
+ turbo_prev_image = image_transform_ransac(turbo_prev_image, matrix, anim_args.hybrid_motion, cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE)
247
+ if advance_next:
248
+ matrix = get_matrix_for_hybrid_motion_prev(tween_frame_idx, (args.W, args.H), inputfiles, turbo_next_image, anim_args.hybrid_motion)
249
+ turbo_next_image = image_transform_ransac(turbo_next_image, matrix, anim_args.hybrid_motion, cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE)
250
+ else:
251
+ matrix = get_matrix_for_hybrid_motion(tween_frame_idx-1, (args.W, args.H), inputfiles, anim_args.hybrid_motion)
252
+ if advance_prev:
253
+ turbo_prev_image = image_transform_ransac(turbo_prev_image, matrix, anim_args.hybrid_motion, cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE)
254
+ if advance_next:
255
+ turbo_next_image = image_transform_ransac(turbo_next_image, matrix, anim_args.hybrid_motion, cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE)
256
+ if anim_args.hybrid_motion in ['Optical Flow']:
257
+ if anim_args.hybrid_motion_use_prev_img:
258
+ if advance_prev:
259
+ flow = get_flow_for_hybrid_motion_prev(tween_frame_idx-1, (args.W, args.H), inputfiles, hybrid_frame_path, turbo_prev_image, anim_args.hybrid_flow_method, anim_args.hybrid_comp_save_extra_frames)
260
+ turbo_prev_image = image_transform_optical_flow(turbo_prev_image, flow, cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE)
261
+ if advance_next:
262
+ flow = get_flow_for_hybrid_motion_prev(tween_frame_idx-1, (args.W, args.H), inputfiles, hybrid_frame_path, turbo_next_image, anim_args.hybrid_flow_method, anim_args.hybrid_comp_save_extra_frames)
263
+ turbo_next_image = image_transform_optical_flow(turbo_next_image, flow, cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE)
264
+ else:
265
+ flow = get_flow_for_hybrid_motion(tween_frame_idx-1, (args.W, args.H), inputfiles, hybrid_frame_path, anim_args.hybrid_flow_method, anim_args.hybrid_comp_save_extra_frames)
266
+ if advance_prev:
267
+ turbo_prev_image = image_transform_optical_flow(turbo_prev_image, flow, cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE)
268
+ if advance_next:
269
+ turbo_next_image = image_transform_optical_flow(turbo_next_image, flow, cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE)
270
+
271
+ turbo_prev_frame_idx = turbo_next_frame_idx = tween_frame_idx
272
+
273
+ if turbo_prev_image is not None and tween < 1.0:
274
+ img = turbo_prev_image*(1.0-tween) + turbo_next_image*tween
275
+ else:
276
+ img = turbo_next_image
277
+
278
+ # intercept and override to grayscale
279
+ if anim_args.color_force_grayscale:
280
+ img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2GRAY)
281
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
282
+
283
+ filename = f"{args.timestring}_{tween_frame_idx:05}.png"
284
+ cv2.imwrite(os.path.join(args.outdir, filename), img)
285
+ if anim_args.save_depth_maps:
286
+ depth_model.save(os.path.join(args.outdir, f"{args.timestring}_depth_{tween_frame_idx:05}.png"), depth)
287
+ if turbo_next_image is not None:
288
+ prev_img = turbo_next_image
289
+
290
+ # apply transforms to previous frame
291
+ if prev_img is not None:
292
+ prev_img, depth = anim_frame_warp(prev_img, args, anim_args, keys, frame_idx, depth_model, depth=None, device=root.device, half_precision=root.half_precision)
293
+
294
+ # hybrid video motion - warps prev_img to match motion, usually to prepare for compositing
295
+ if frame_idx > 0:
296
+ if anim_args.hybrid_motion in ['Affine', 'Perspective']:
297
+ if anim_args.hybrid_motion_use_prev_img:
298
+ matrix = get_matrix_for_hybrid_motion_prev(frame_idx, (args.W, args.H), inputfiles, prev_img, anim_args.hybrid_motion)
299
+ else:
300
+ matrix = get_matrix_for_hybrid_motion(frame_idx-1, (args.W, args.H), inputfiles, anim_args.hybrid_motion)
301
+ prev_img = image_transform_ransac(prev_img, matrix, anim_args.hybrid_motion, cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE)
302
+ if anim_args.hybrid_motion in ['Optical Flow']:
303
+ if anim_args.hybrid_motion_use_prev_img:
304
+ flow = get_flow_for_hybrid_motion_prev(frame_idx-1, (args.W, args.H), inputfiles, hybrid_frame_path, prev_img, anim_args.hybrid_flow_method, anim_args.hybrid_comp_save_extra_frames)
305
+ else:
306
+ flow = get_flow_for_hybrid_motion(frame_idx-1, (args.W, args.H), inputfiles, hybrid_frame_path, anim_args.hybrid_flow_method, anim_args.hybrid_comp_save_extra_frames)
307
+ prev_img = image_transform_optical_flow(prev_img, flow, cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE)
308
+
309
+ # do hybrid video - composites video frame into prev_img (now warped if using motion)
310
+ if anim_args.hybrid_composite:
311
+ args, prev_img = hybrid_composite(args, anim_args, frame_idx, prev_img, depth_model, hybrid_comp_schedules, root)
312
+
313
+ # apply color matching
314
+ if anim_args.color_coherence != 'None':
315
+ # video color matching
316
+ hybrid_available = anim_args.hybrid_composite or anim_args.hybrid_motion in ['Optical Flow', 'Affine', 'Perspective']
317
+ if anim_args.color_coherence == 'Video Input' and hybrid_available:
318
+ video_color_coherence_frame = int(frame_idx) % int(anim_args.color_coherence_video_every_N_frames) == 0
319
+ if video_color_coherence_frame:
320
+ prev_vid_img = Image.open(os.path.join(args.outdir, 'inputframes', get_frame_name(anim_args.video_init_path) + f"{frame_idx:05}.jpg"))
321
+ prev_vid_img = prev_vid_img.resize((args.W, args.H), Image.Resampling.LANCZOS)
322
+ color_match_sample = np.asarray(prev_vid_img)
323
+ color_match_sample = cv2.cvtColor(color_match_sample, cv2.COLOR_RGB2BGR)
324
+ if color_match_sample is None:
325
+ color_match_sample = prev_img.copy()
326
+ else:
327
+ prev_img = maintain_colors(prev_img, color_match_sample, anim_args.color_coherence)
328
+
329
+ # intercept and override to grayscale
330
+ if anim_args.color_force_grayscale:
331
+ prev_img = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)
332
+ prev_img = cv2.cvtColor(prev_img, cv2.COLOR_GRAY2BGR)
333
+
334
+ # apply scaling
335
+ contrast_image = (prev_img * contrast).round().astype(np.uint8)
336
+ # anti-blur
337
+ if amount > 0:
338
+ contrast_image = unsharp_mask(contrast_image, (kernel, kernel), sigma, amount, threshold, mask_image if args.use_mask else None)
339
+ # apply frame noising
340
+ if args.use_mask or anim_args.use_noise_mask:
341
+ args.noise_mask = compose_mask_with_check(root, args, noise_mask_seq, noise_mask_vals, Image.fromarray(cv2.cvtColor(contrast_image, cv2.COLOR_BGR2RGB)))
342
+ noised_image = add_noise(contrast_image, noise, args.seed, anim_args.noise_type,
343
+ (anim_args.perlin_w, anim_args.perlin_h, anim_args.perlin_octaves, anim_args.perlin_persistence),
344
+ args.noise_mask, args.invert_mask)
345
+
346
+ # use transformed previous frame as init for current
347
+ args.use_init = True
348
+ args.init_sample = Image.fromarray(cv2.cvtColor(noised_image, cv2.COLOR_BGR2RGB))
349
+ args.strength = max(0.0, min(1.0, strength))
350
+
351
+ args.scale = scale
352
+
353
+ # Pix2Pix Image CFG Scale - does *nothing* with non pix2pix checkpoints
354
+ args.pix2pix_img_cfg_scale = float(keys.pix2pix_img_cfg_scale_series[frame_idx])
355
+
356
+ # grab prompt for current frame
357
+ args.prompt = prompt_series[frame_idx]
358
+
359
+ if args.seed_behavior == 'schedule' or use_parseq:
360
+ args.seed = int(keys.seed_schedule_series[frame_idx])
361
+
362
+ if anim_args.enable_checkpoint_scheduling:
363
+ args.checkpoint = keys.checkpoint_schedule_series[frame_idx]
364
+ else:
365
+ args.checkpoint = None
366
+
367
+ #SubSeed scheduling
368
+ if anim_args.enable_subseed_scheduling:
369
+ args.subseed = int(keys.subseed_schedule_series[frame_idx])
370
+ args.subseed_strength = float(keys.subseed_strength_schedule_series[frame_idx])
371
+
372
+ if use_parseq:
373
+ args.seed_enable_extras = True
374
+ args.subseed = int(keys.subseed_series[frame_idx])
375
+ args.subseed_strength = keys.subseed_strength_series[frame_idx]
376
+
377
+ prompt_to_print, *after_neg = args.prompt.strip().split("--neg")
378
+ prompt_to_print = prompt_to_print.strip()
379
+ after_neg = "".join(after_neg).strip()
380
+
381
+ print(f"\033[32mSeed: \033[0m{args.seed}")
382
+ print(f"\033[35mPrompt: \033[0m{prompt_to_print}")
383
+ if after_neg and after_neg.strip():
384
+ print(f"\033[91mNeg Prompt: \033[0m{after_neg}")
385
+ if not using_vid_init:
386
+ # print motion table to cli if anim mode = 2D or 3D
387
+ if anim_args.animation_mode in ['2D','3D']:
388
+ print_render_table(anim_args, keys, frame_idx)
389
+
390
+ # grab init image for current frame
391
+ elif using_vid_init:
392
+ init_frame = get_next_frame(args.outdir, anim_args.video_init_path, frame_idx, False)
393
+ print(f"Using video init frame {init_frame}")
394
+ args.init_image = init_frame
395
+ if anim_args.use_mask_video:
396
+ mask_vals['video_mask'] = get_mask_from_file(get_next_frame(args.outdir, anim_args.video_mask_path, frame_idx, True), args)
397
+
398
+ if args.use_mask:
399
+ args.mask_image = compose_mask_with_check(root, args, mask_seq, mask_vals, args.init_sample) if args.init_sample is not None else None # we need it only after the first frame anyway
400
+
401
+ # setting up some arguments for the looper
402
+ loop_args.imageStrength = loopSchedulesAndData.image_strength_schedule_series[frame_idx]
403
+ loop_args.blendFactorMax = loopSchedulesAndData.blendFactorMax_series[frame_idx]
404
+ loop_args.blendFactorSlope = loopSchedulesAndData.blendFactorSlope_series[frame_idx]
405
+ loop_args.tweeningFrameSchedule = loopSchedulesAndData.tweening_frames_schedule_series[frame_idx]
406
+ loop_args.colorCorrectionFactor = loopSchedulesAndData.color_correction_factor_series[frame_idx]
407
+ loop_args.use_looper = loopSchedulesAndData.use_looper
408
+ loop_args.imagesToKeyframe = loopSchedulesAndData.imagesToKeyframe
409
+
410
+ if scheduled_clipskip is not None:
411
+ opts.data["CLIP_stop_at_last_layers"] = scheduled_clipskip
412
+
413
+ if anim_args.animation_mode == '3D' and (cmd_opts.lowvram or cmd_opts.medvram):
414
+ depth_model.to('cpu')
415
+ devices.torch_gc()
416
+ lowvram.setup_for_low_vram(sd_model, cmd_opts.medvram)
417
+ sd_hijack.model_hijack.hijack(sd_model)
418
+
419
+ # sample the diffusion model
420
+ image = generate(args, anim_args, loop_args, controlnet_args, root, frame_idx, sampler_name=scheduled_sampler_name)
421
+ patience = 10
422
+
423
+ # intercept and override to grayscale
424
+ if anim_args.color_force_grayscale:
425
+ image = ImageOps.grayscale(image)
426
+ image = ImageOps.colorize(image, black ="black", white ="white")
427
+
428
+ # reroll blank frame
429
+ if not image.getbbox():
430
+ print("Blank frame detected! If you don't have the NSFW filter enabled, this may be due to a glitch!")
431
+ if args.reroll_blank_frames == 'reroll':
432
+ while not image.getbbox():
433
+ print("Rerolling with +1 seed...")
434
+ args.seed += 1
435
+ image = generate(args, anim_args, loop_args, controlnet_args, root, frame_idx, sampler_name=scheduled_sampler_name)
436
+ patience -= 1
437
+ if patience == 0:
438
+ print("Rerolling with +1 seed failed for 10 iterations! Try setting webui's precision to 'full' and if it fails, please report this to the devs! Interrupting...")
439
+ state.interrupted = True
440
+ state.current_image = image
441
+ return
442
+ elif args.reroll_blank_frames == 'interrupt':
443
+ print("Interrupting to save your eyes...")
444
+ state.interrupted = True
445
+ state.current_image = image
446
+ image = blank_frame_reroll(image, args, root, frame_idx)
447
+ if image == None:
448
+ return
449
+
450
+ opencv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
451
+ if not using_vid_init:
452
+ prev_img = opencv_image
453
+
454
+ if turbo_steps > 1:
455
+ turbo_prev_image, turbo_prev_frame_idx = turbo_next_image, turbo_next_frame_idx
456
+ turbo_next_image, turbo_next_frame_idx = opencv_image, frame_idx
457
+ frame_idx += turbo_steps
458
+ else:
459
+ filename = f"{args.timestring}_{frame_idx:05}.png"
460
+ save_image(image, 'PIL', filename, args, video_args, root)
461
+
462
+ if anim_args.save_depth_maps:
463
+ if cmd_opts.lowvram or cmd_opts.medvram:
464
+ lowvram.send_everything_to_cpu()
465
+ sd_hijack.model_hijack.undo_hijack(sd_model)
466
+ devices.torch_gc()
467
+ depth_model.to(root.device)
468
+ depth = depth_model.predict(opencv_image, anim_args, root.half_precision)
469
+ depth_model.save(os.path.join(args.outdir, f"{args.timestring}_depth_{frame_idx:05}.png"), depth)
470
+ if cmd_opts.lowvram or cmd_opts.medvram:
471
+ depth_model.to('cpu')
472
+ devices.torch_gc()
473
+ lowvram.setup_for_low_vram(sd_model, cmd_opts.medvram)
474
+ sd_hijack.model_hijack.hijack(sd_model)
475
+ frame_idx += 1
476
+
477
+ state.current_image = image
478
+
479
+ args.seed = next_seed(args)
480
+
481
+ def print_render_table(anim_args, keys, frame_idx):
482
+ from rich.table import Table
483
+ from rich import box
484
+ table = Table(padding=0, box=box.ROUNDED)
485
+ field_names = []
486
+ if anim_args.animation_mode == '2D':
487
+ short_zoom = round(keys.zoom_series[frame_idx], 6)
488
+ field_names += ["Angle", "Zoom"]
489
+ field_names += ["Tr X", "Tr Y"]
490
+ if anim_args.animation_mode == '3D':
491
+ field_names += ["Tr Z", "Ro X", "Ro Y", "Ro Z"]
492
+ if anim_args.enable_perspective_flip:
493
+ field_names += ["Pf T", "Pf P", "Pf G", "Pf F"]
494
+ for field_name in field_names:
495
+ table.add_column(field_name, justify="center")
496
+
497
+ rows = []
498
+ if anim_args.animation_mode == '2D':
499
+ rows += [str(keys.angle_series[frame_idx]),str(short_zoom)]
500
+ rows += [str(keys.translation_x_series[frame_idx]),str(keys.translation_y_series[frame_idx])]
501
+ if anim_args.animation_mode == '3D':
502
+ rows += [str(keys.translation_z_series[frame_idx]),str(keys.rotation_3d_x_series[frame_idx]),str(keys.rotation_3d_y_series[frame_idx]),str(keys.rotation_3d_z_series[frame_idx])]
503
+ if anim_args.enable_perspective_flip:
504
+ rows +=[str(keys.perspective_flip_theta_series[frame_idx]), str(keys.perspective_flip_phi_series[frame_idx]), str(keys.perspective_flip_gamma_series[frame_idx]), str(keys.perspective_flip_fv_series[frame_idx])]
505
+ table.add_row(*rows)
506
+
507
+ console.print(table)
extensions/deforum/scripts/deforum_helpers/render_modes.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ import json
4
+ from .render import render_animation
5
+ from .seed import next_seed
6
+ from .video_audio_utilities import vid2frames
7
+ from .prompt import interpolate_prompts
8
+ from .generate import generate
9
+ from .animation_key_frames import DeformAnimKeys
10
+ from .parseq_adapter import ParseqAnimKeys
11
+ from .save_images import save_image
12
+ from .settings import get_keys_to_exclude
13
+
14
+ # Webui
15
+ from modules.shared import opts, cmd_opts, state
16
+
17
+ def render_input_video(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, animation_prompts, root):
18
+ # create a folder for the video input frames to live in
19
+ video_in_frame_path = os.path.join(args.outdir, 'inputframes')
20
+ os.makedirs(video_in_frame_path, exist_ok=True)
21
+
22
+ # save the video frames from input video
23
+ print(f"Exporting Video Frames (1 every {anim_args.extract_nth_frame}) frames to {video_in_frame_path}...")
24
+ vid2frames(video_path = anim_args.video_init_path, video_in_frame_path=video_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame)
25
+
26
+ # determine max frames from length of input frames
27
+ anim_args.max_frames = len([f for f in pathlib.Path(video_in_frame_path).glob('*.jpg')])
28
+ args.use_init = True
29
+ print(f"Loading {anim_args.max_frames} input frames from {video_in_frame_path} and saving video frames to {args.outdir}")
30
+
31
+ if anim_args.use_mask_video:
32
+ # create a folder for the mask video input frames to live in
33
+ mask_in_frame_path = os.path.join(args.outdir, 'maskframes')
34
+ os.makedirs(mask_in_frame_path, exist_ok=True)
35
+
36
+ # save the video frames from mask video
37
+ print(f"Exporting Video Frames (1 every {anim_args.extract_nth_frame}) frames to {mask_in_frame_path}...")
38
+ vid2frames(video_path=anim_args.video_mask_path,video_in_frame_path=mask_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame)
39
+ max_mask_frames = len([f for f in pathlib.Path(mask_in_frame_path).glob('*.jpg')])
40
+
41
+ # limit max frames if there are less frames in the video mask compared to input video
42
+ if max_mask_frames < anim_args.max_frames :
43
+ anim_args.max_mask_frames
44
+ print ("Video mask contains less frames than init video, max frames limited to number of mask frames.")
45
+ args.use_mask = True
46
+ args.overlay_mask = True
47
+
48
+
49
+ render_animation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, animation_prompts, root)
50
+
51
+ # Modified a copy of the above to allow using masking video with out a init video.
52
+ def render_animation_with_video_mask(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, animation_prompts, root):
53
+ # create a folder for the video input frames to live in
54
+ mask_in_frame_path = os.path.join(args.outdir, 'maskframes')
55
+ os.makedirs(mask_in_frame_path, exist_ok=True)
56
+
57
+ # save the video frames from mask video
58
+ print(f"Exporting Video Frames (1 every {anim_args.extract_nth_frame}) frames to {mask_in_frame_path}...")
59
+ vid2frames(video_path=anim_args.video_mask_path, video_in_frame_path=mask_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame)
60
+ args.use_mask = True
61
+ #args.overlay_mask = True
62
+
63
+ # determine max frames from length of input frames
64
+ anim_args.max_frames = len([f for f in pathlib.Path(mask_in_frame_path).glob('*.jpg')])
65
+ #args.use_init = True
66
+ print(f"Loading {anim_args.max_frames} input frames from {mask_in_frame_path} and saving video frames to {args.outdir}")
67
+
68
+ render_animation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, animation_prompts, root)
69
+
70
+
71
+ def render_interpolation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, animation_prompts, root):
72
+
73
+ # use parseq if manifest is provided
74
+ use_parseq = parseq_args.parseq_manifest != None and parseq_args.parseq_manifest.strip()
75
+
76
+ # expand key frame strings to values
77
+ keys = DeformAnimKeys(anim_args) if not use_parseq else ParseqAnimKeys(parseq_args, anim_args)
78
+
79
+ # create output folder for the batch
80
+ os.makedirs(args.outdir, exist_ok=True)
81
+ print(f"Saving interpolation animation frames to {args.outdir}")
82
+
83
+ # save settings for the batch
84
+ exclude_keys = get_keys_to_exclude('general')
85
+ settings_filename = os.path.join(args.outdir, f"{args.timestring}_settings.txt")
86
+ with open(settings_filename, "w+", encoding="utf-8") as f:
87
+ s = {}
88
+ for d in [dict(args.__dict__), dict(anim_args.__dict__), dict(parseq_args.__dict__)]:
89
+ for key, value in d.items():
90
+ if key not in exclude_keys:
91
+ s[key] = value
92
+ json.dump(s, f, ensure_ascii=False, indent=4)
93
+
94
+ # Compute interpolated prompts
95
+ if use_parseq:
96
+ print("Parseq prompts are assumed to already be interpolated - not doing any additional prompt interpolation")
97
+ prompt_series = keys.prompts
98
+ else:
99
+ print("Generating interpolated prompts for all frames")
100
+ prompt_series = interpolate_prompts(animation_prompts, anim_args.max_frames)
101
+
102
+ state.job_count = anim_args.max_frames
103
+ frame_idx = 0
104
+ # INTERPOLATION MODE
105
+ while frame_idx < anim_args.max_frames:
106
+ # print data to cli
107
+ prompt_to_print = prompt_series[frame_idx].strip()
108
+ if prompt_to_print.endswith("--neg"):
109
+ prompt_to_print = prompt_to_print[:-5]
110
+ print(f"\033[36mInterpolation frame: \033[0m{frame_idx}/{anim_args.max_frames} ")
111
+ print(f"\033[32mSeed: \033[0m{args.seed}")
112
+ print(f"\033[35mPrompt: \033[0m{prompt_to_print}")
113
+
114
+ state.job = f"frame {frame_idx + 1}/{anim_args.max_frames}"
115
+ state.job_no = frame_idx + 1
116
+
117
+ if state.interrupted:
118
+ break
119
+
120
+ # grab inputs for current frame generation
121
+ args.n_samples = 1
122
+ args.prompt = prompt_series[frame_idx]
123
+ args.scale = keys.cfg_scale_schedule_series[frame_idx]
124
+ args.pix2pix_img_cfg_scale = keys.pix2pix_img_cfg_scale_series[frame_idx]
125
+
126
+ if anim_args.enable_checkpoint_scheduling:
127
+ args.checkpoint = keys.checkpoint_schedule_series[frame_idx]
128
+ print(f"Checkpoint changed to: {args.checkpoint}")
129
+ else:
130
+ args.checkpoint = None
131
+
132
+ if anim_args.enable_subseed_scheduling:
133
+ args.subseed = keys.subseed_schedule_series[frame_idx]
134
+ args.subseed_strength = keys.subseed_strength_schedule_series[frame_idx]
135
+
136
+ if use_parseq:
137
+ anim_args.enable_subseed_scheduling = True
138
+ args.subseed = int(keys.subseed_series[frame_idx])
139
+ args.subseed_strength = keys.subseed_strength_series[frame_idx]
140
+
141
+ if args.seed_behavior == 'schedule' or use_parseq:
142
+ args.seed = int(keys.seed_schedule_series[frame_idx])
143
+
144
+ image = generate(args, anim_args, loop_args, controlnet_args, root, frame_idx)
145
+ filename = f"{args.timestring}_{frame_idx:05}.png"
146
+
147
+ save_image(image, 'PIL', filename, args, video_args, root)
148
+
149
+ state.current_image = image
150
+
151
+ if args.seed_behavior != 'schedule':
152
+ args.seed = next_seed(args)
153
+
154
+ frame_idx += 1
extensions/deforum/scripts/deforum_helpers/rich.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from rich.console import Console
2
+ console = Console()
extensions/deforum/scripts/deforum_helpers/save_images.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple
2
+ from einops import rearrange
3
+ import numpy as np, os, torch
4
+ from PIL import Image
5
+ from torchvision.utils import make_grid
6
+ import time
7
+
8
+
9
+ def get_output_folder(output_path, batch_folder):
10
+ out_path = os.path.join(output_path,time.strftime('%Y-%m'))
11
+ if batch_folder != "":
12
+ out_path = os.path.join(out_path, batch_folder)
13
+ os.makedirs(out_path, exist_ok=True)
14
+ return out_path
15
+
16
+
17
+ def save_samples(
18
+ args, x_samples: torch.Tensor, seed: int, n_rows: int
19
+ ) -> Tuple[Image.Image, List[Image.Image]]:
20
+ """Function to save samples to disk.
21
+ Args:
22
+ args: Stable deforum diffusion arguments.
23
+ x_samples: Samples to save.
24
+ seed: Seed for the experiment.
25
+ n_rows: Number of rows in the grid.
26
+ Returns:
27
+ A tuple of the grid image and a list of the generated images.
28
+ ( grid_image, generated_images )
29
+ """
30
+
31
+ # save samples
32
+ images = []
33
+ grid_image = None
34
+ if args.display_samples or args.save_samples:
35
+ for index, x_sample in enumerate(x_samples):
36
+ x_sample = 255.0 * rearrange(x_sample.cpu().numpy(), "c h w -> h w c")
37
+ images.append(Image.fromarray(x_sample.astype(np.uint8)))
38
+ if args.save_samples:
39
+ images[-1].save(
40
+ os.path.join(
41
+ args.outdir, f"{args.timestring}_{index:02}_{seed}.png"
42
+ )
43
+ )
44
+
45
+ # save grid
46
+ if args.display_grid or args.save_grid:
47
+ grid = torch.stack([x_samples], 0)
48
+ grid = rearrange(grid, "n b c h w -> (n b) c h w")
49
+ grid = make_grid(grid, nrow=n_rows, padding=0)
50
+
51
+ # to image
52
+ grid = 255.0 * rearrange(grid, "c h w -> h w c").cpu().numpy()
53
+ grid_image = Image.fromarray(grid.astype(np.uint8))
54
+ if args.save_grid:
55
+ grid_image.save(
56
+ os.path.join(args.outdir, f"{args.timestring}_{seed}_grid.png")
57
+ )
58
+
59
+ # return grid_image and individual sample images
60
+ return grid_image, images
61
+
62
+ def save_image(image, image_type, filename, args, video_args, root):
63
+ if video_args.store_frames_in_ram:
64
+ root.frames_cache.append({'path':os.path.join(args.outdir, filename), 'image':image, 'image_type':image_type})
65
+ else:
66
+ image.save(os.path.join(args.outdir, filename))
67
+
68
+ import cv2, gc
69
+
70
+ def reset_frames_cache(root):
71
+ root.frames_cache = []
72
+ gc.collect()
73
+
74
+ def dump_frames_cache(root):
75
+ for image_cache in root.frames_cache:
76
+ if image_cache['image_type'] == 'cv2':
77
+ cv2.imwrite(image_cache['path'], image_cache['image'])
78
+ elif image_cache['image_type'] == 'PIL':
79
+ image_cache['image'].save(image_cache['path'])
80
+ # do not reset the cache since we're going to add frame erasing later function #TODO
extensions/deforum/scripts/deforum_helpers/seed.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ def next_seed(args):
4
+ if args.seed_behavior == 'iter':
5
+ if args.seed_internal % args.seed_iter_N == 0:
6
+ args.seed += 1
7
+ args.seed_internal += 1
8
+ elif args.seed_behavior == 'ladder':
9
+ if args.seed_internal == 0:
10
+ args.seed += 2
11
+ args.seed_internal = 1
12
+ else:
13
+ args.seed -= 1
14
+ args.seed_internal = 0
15
+ elif args.seed_behavior == 'alternate':
16
+ if args.seed_internal == 0:
17
+ args.seed += 1
18
+ args.seed_internal = 1
19
+ else:
20
+ args.seed -= 1
21
+ args.seed_internal = 0
22
+ elif args.seed_behavior == 'fixed':
23
+ pass # always keep seed the same
24
+ else:
25
+ args.seed = random.randint(0, 2**32 - 1)
26
+ return args.seed
extensions/deforum/scripts/deforum_helpers/settings.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import ceil
2
+ import os
3
+ import json
4
+ import deforum_helpers.args as deforum_args
5
+ from .args import mask_fill_choices, DeforumArgs, DeforumAnimArgs
6
+ from .deprecation_utils import handle_deprecated_settings
7
+ import logging
8
+
9
+ def get_keys_to_exclude(setting_type):
10
+ if setting_type == 'general':
11
+ return ["n_batch", "restore_faces", "seed_enable_extras", "save_samples", "display_samples", "show_sample_per_step", "filename_format", "from_img2img_instead_of_link", "scale", "subseed", "subseed_strength", "C", "f", "init_latent", "init_sample", "init_c", "noise_mask", "seed_internal"]
12
+ else: #video
13
+ return ["mp4_path", "image_path", "output_format","render_steps","path_name_modifier"]
14
+
15
+ def load_args(args_dict,anim_args_dict, parseq_args_dict, loop_args_dict, controlnet_args_dict, custom_settings_file, root):
16
+ print(f"reading custom settings from {custom_settings_file}")
17
+ if not os.path.isfile(custom_settings_file):
18
+ print('The custom settings file does not exist. The in-notebook settings will be used instead')
19
+ else:
20
+ with open(custom_settings_file, "r") as f:
21
+ jdata = json.loads(f.read())
22
+ handle_deprecated_settings(jdata)
23
+ root.animation_prompts = jdata["prompts"]
24
+ if "animation_prompts_positive" in jdata:
25
+ root.animation_prompts_positive = jdata["animation_prompts_positive"]
26
+ if "animation_prompts_negative" in jdata:
27
+ root.animation_prompts_negative = jdata["animation_prompts_negative"]
28
+ for i, k in enumerate(args_dict):
29
+ if k in jdata:
30
+ args_dict[k] = jdata[k]
31
+ else:
32
+ print(f"key {k} doesn't exist in the custom settings data! using the default value of {args_dict[k]}")
33
+ for i, k in enumerate(anim_args_dict):
34
+ if k in jdata:
35
+ anim_args_dict[k] = jdata[k]
36
+ else:
37
+ print(f"key {k} doesn't exist in the custom settings data! using the default value of {anim_args_dict[k]}")
38
+ for i, k in enumerate(parseq_args_dict):
39
+ if k in jdata:
40
+ parseq_args_dict[k] = jdata[k]
41
+ else:
42
+ print(f"key {k} doesn't exist in the custom settings data! using the default value of {parseq_args_dict[k]}")
43
+ for i, k in enumerate(loop_args_dict):
44
+ if k in jdata:
45
+ loop_args_dict[k] = jdata[k]
46
+ else:
47
+ print(f"key {k} doesn't exist in the custom settings data! using the default value of {loop_args_dict[k]}")
48
+ print(args_dict)
49
+ print(anim_args_dict)
50
+ print(parseq_args_dict)
51
+ print(loop_args_dict)
52
+
53
+ # In gradio gui settings save/ load funs:
54
+ def save_settings(*args, **kwargs):
55
+ settings_path = args[0].strip()
56
+ data = {deforum_args.settings_component_names[i]: args[i+1] for i in range(0, len(deforum_args.settings_component_names))}
57
+ from deforum_helpers.args import pack_args, pack_anim_args, pack_parseq_args, pack_loop_args, pack_controlnet_args
58
+ args_dict = pack_args(data)
59
+ anim_args_dict = pack_anim_args(data)
60
+ parseq_dict = pack_parseq_args(data)
61
+ args_dict["prompts"] = json.loads(data['animation_prompts'])
62
+ args_dict["animation_prompts_positive"] = data['animation_prompts_positive']
63
+ args_dict["animation_prompts_negative"] = data['animation_prompts_negative']
64
+ loop_dict = pack_loop_args(data)
65
+ controlnet_dict = pack_controlnet_args(data)
66
+
67
+ combined = {**args_dict, **anim_args_dict, **parseq_dict, **loop_dict, **controlnet_dict}
68
+ exclude_keys = get_keys_to_exclude('general') + ['controlnet_input_video_chosen_file', 'controlnet_input_video_mask_chosen_file']
69
+ filtered_combined = {k: v for k, v in combined.items() if k not in exclude_keys}
70
+
71
+ print(f"saving custom settings to {settings_path}")
72
+ with open(settings_path, "w") as f:
73
+ f.write(json.dumps(filtered_combined, ensure_ascii=False, indent=4))
74
+
75
+ return [""]
76
+
77
+ def save_video_settings(*args, **kwargs):
78
+ video_settings_path = args[0].strip()
79
+ data = {deforum_args.video_args_names[i]: args[i+1] for i in range(0, len(deforum_args.video_args_names))}
80
+ from deforum_helpers.args import pack_video_args
81
+ video_args_dict = pack_video_args(data)
82
+ exclude_keys = get_keys_to_exclude('video')
83
+ filtered_data = video_args_dict if exclude_keys is None else {k: v for k, v in video_args_dict.items() if k not in exclude_keys}
84
+ print(f"saving video settings to {video_settings_path}")
85
+ with open(video_settings_path, "w") as f:
86
+ f.write(json.dumps(filtered_data, ensure_ascii=False, indent=4))
87
+ return [""]
88
+
89
+ def load_settings(*args, **kwargs):
90
+ settings_path = args[0].strip()
91
+ data = {deforum_args.settings_component_names[i]: args[i+1] for i in range(0, len(deforum_args.settings_component_names))}
92
+ print(f"reading custom settings from {settings_path}")
93
+ jdata = {}
94
+ if not os.path.isfile(settings_path):
95
+ print('The custom settings file does not exist. The values will be unchanged.')
96
+ return [data[name] for name in deforum_args.settings_component_names] + [""]
97
+ else:
98
+ with open(settings_path, "r") as f:
99
+ jdata = json.loads(f.read())
100
+ handle_deprecated_settings(jdata)
101
+ ret = []
102
+ if 'animation_prompts' in jdata:
103
+ jdata['prompts'] = jdata['animation_prompts']#compatibility with old versions
104
+ if 'animation_prompts_positive' in jdata:
105
+ data["animation_prompts_positive"] = jdata['animation_prompts_positive']
106
+ if 'animation_prompts_negative' in jdata:
107
+ data["animation_prompts_negative"] = jdata['animation_prompts_negative']
108
+ for key in data:
109
+ if key == 'sampler':
110
+ sampler_val = jdata[key]
111
+ if type(sampler_val) == int:
112
+ from modules.sd_samplers import samplers_for_img2img
113
+ ret.append(samplers_for_img2img[sampler_val].name)
114
+ else:
115
+ ret.append(sampler_val)
116
+
117
+ elif key == 'fill':
118
+ if key in jdata:
119
+ fill_val = jdata[key]
120
+ if type(fill_val) == int:
121
+ ret.append(mask_fill_choices[fill_val])
122
+ else:
123
+ ret.append(fill_val)
124
+ else:
125
+ fill_default = DeforumArgs()['fill']
126
+ logging.debug(f"Fill not found in load file, using default value: {fill_default}")
127
+ ret.append(mask_fill_choices[fill_default])
128
+
129
+ elif key == 'reroll_blank_frames':
130
+ if key in jdata:
131
+ reroll_blank_frames_val = jdata[key]
132
+ ret.append(reroll_blank_frames_val)
133
+ else:
134
+ reroll_blank_frames_default = DeforumArgs()['reroll_blank_frames']
135
+ logging.debug(f"Reroll blank frames not found in load file, using default value: {reroll_blank_frames_default}")
136
+ ret.append(reroll_blank_frames_default)
137
+
138
+ elif key == 'noise_type':
139
+ if key in jdata:
140
+ noise_type_val = jdata[key]
141
+ ret.append(noise_type_val)
142
+ else:
143
+ noise_type_default = DeforumAnimArgs()['noise_type']
144
+ logging.debug(f"Noise type not found in load file, using default value: {noise_type_default}")
145
+ ret.append(noise_type_default)
146
+
147
+ elif key in jdata:
148
+ ret.append(jdata[key])
149
+ else:
150
+ if key == 'animation_prompts':
151
+ ret.append(json.dumps(jdata['prompts'], ensure_ascii=False, indent=4))
152
+ elif key == 'animation_prompts_positive' and 'animation_prompts_positive' in jdata:
153
+ ret.append(jdata['animation_prompts_positive'])
154
+ elif key == 'animation_prompts_negative' and 'animation_prompts_negative' in jdata:
155
+ ret.append(jdata['animation_prompts_negative'])
156
+ else:
157
+ ret.append(data[key])
158
+
159
+ #stuff
160
+ ret.append("")
161
+
162
+ return ret
163
+
164
+ def load_video_settings(*args, **kwargs):
165
+ video_settings_path = args[0].strip()
166
+ data = {deforum_args.video_args_names[i]: args[i+1] for i in range(0, len(deforum_args.video_args_names))}
167
+ print(f"reading custom video settings from {video_settings_path}")
168
+ jdata = {}
169
+ if not os.path.isfile(video_settings_path):
170
+ print('The custom video settings file does not exist. The values will be unchanged.')
171
+ return [data[name] for name in deforum_args.video_args_names] + [""]
172
+ else:
173
+ with open(video_settings_path, "r") as f:
174
+ jdata = json.loads(f.read())
175
+ handle_deprecated_settings(jdata)
176
+ ret = []
177
+
178
+ for key in data:
179
+ if key == 'add_soundtrack':
180
+ add_soundtrack_val = jdata[key]
181
+ if type(add_soundtrack_val) == bool:
182
+ ret.append('File' if add_soundtrack_val else 'None')
183
+ else:
184
+ ret.append(add_soundtrack_val)
185
+ elif key in jdata:
186
+ ret.append(jdata[key])
187
+ else:
188
+ ret.append(data[key])
189
+
190
+ #stuff
191
+ ret.append("")
192
+
193
+ return ret
194
+
195
+ import tqdm
196
+ from modules.shared import state, progress_print_out, opts, cmd_opts
197
+ class DeforumTQDM:
198
+ def __init__(self, args, anim_args, parseq_args):
199
+ self._tqdm = None
200
+ self._args = args
201
+ self._anim_args = anim_args
202
+ self._parseq_args = parseq_args
203
+
204
+ def reset(self):
205
+ from .animation_key_frames import DeformAnimKeys
206
+ from .parseq_adapter import ParseqAnimKeys
207
+ deforum_total = 0
208
+ # FIXME: get only amount of steps
209
+ use_parseq = self._parseq_args.parseq_manifest != None and self._parseq_args.parseq_manifest.strip()
210
+ keys = DeformAnimKeys(self._anim_args) if not use_parseq else ParseqAnimKeys(self._parseq_args, self._anim_args)
211
+
212
+ start_frame = 0
213
+ if self._anim_args.resume_from_timestring:
214
+ for tmp in os.listdir(self._args.outdir):
215
+ filename = tmp.split("_")
216
+ # don't use saved depth maps to count number of frames
217
+ if self._anim_args.resume_timestring in filename and "depth" not in filename:
218
+ start_frame += 1
219
+ start_frame = start_frame - 1
220
+ using_vid_init = self._anim_args.animation_mode == 'Video Input'
221
+ turbo_steps = 1 if using_vid_init else int(self._anim_args.diffusion_cadence)
222
+ if self._anim_args.resume_from_timestring:
223
+ last_frame = start_frame-1
224
+ if turbo_steps > 1:
225
+ last_frame -= last_frame%turbo_steps
226
+ if turbo_steps > 1:
227
+ turbo_next_frame_idx = last_frame
228
+ turbo_prev_frame_idx = turbo_next_frame_idx
229
+ start_frame = last_frame+turbo_steps
230
+ frame_idx = start_frame
231
+ had_first = False
232
+ while frame_idx < self._anim_args.max_frames:
233
+ strength = keys.strength_schedule_series[frame_idx]
234
+ if not had_first and self._args.use_init and self._args.init_image != None and self._args.init_image != '':
235
+ deforum_total += int(ceil(self._args.steps * (1-strength)))
236
+ had_first = True
237
+ elif not had_first:
238
+ deforum_total += self._args.steps
239
+ had_first = True
240
+ else:
241
+ deforum_total += int(ceil(self._args.steps * (1-strength)))
242
+
243
+ if turbo_steps > 1:
244
+ frame_idx += turbo_steps
245
+ else:
246
+ frame_idx += 1
247
+
248
+ self._tqdm = tqdm.tqdm(
249
+ desc="Deforum progress",
250
+ total=deforum_total,
251
+ position=1,
252
+ file=progress_print_out
253
+ )
254
+
255
+ def update(self):
256
+ if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
257
+ return
258
+ if self._tqdm is None:
259
+ self.reset()
260
+ self._tqdm.update()
261
+
262
+ def updateTotal(self, new_total):
263
+ if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
264
+ return
265
+ if self._tqdm is None:
266
+ self.reset()
267
+ self._tqdm.total=new_total
268
+
269
+ def clear(self):
270
+ if self._tqdm is not None:
271
+ self._tqdm.close()
272
+ self._tqdm = None
extensions/deforum/scripts/deforum_helpers/src/adabins/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .unet_adaptive_bins import UnetAdaptiveBins
extensions/deforum/scripts/deforum_helpers/src/adabins/layers.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+
5
+ class PatchTransformerEncoder(nn.Module):
6
+ def __init__(self, in_channels, patch_size=10, embedding_dim=128, num_heads=4):
7
+ super(PatchTransformerEncoder, self).__init__()
8
+ encoder_layers = nn.TransformerEncoderLayer(embedding_dim, num_heads, dim_feedforward=1024)
9
+ self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers=4) # takes shape S,N,E
10
+
11
+ self.embedding_convPxP = nn.Conv2d(in_channels, embedding_dim,
12
+ kernel_size=patch_size, stride=patch_size, padding=0)
13
+
14
+ self.positional_encodings = nn.Parameter(torch.rand(500, embedding_dim), requires_grad=True)
15
+
16
+ def forward(self, x):
17
+ embeddings = self.embedding_convPxP(x).flatten(2) # .shape = n,c,s = n, embedding_dim, s
18
+ # embeddings = nn.functional.pad(embeddings, (1,0)) # extra special token at start ?
19
+ embeddings = embeddings + self.positional_encodings[:embeddings.shape[2], :].T.unsqueeze(0)
20
+
21
+ # change to S,N,E format required by transformer
22
+ embeddings = embeddings.permute(2, 0, 1)
23
+ x = self.transformer_encoder(embeddings) # .shape = S, N, E
24
+ return x
25
+
26
+
27
+ class PixelWiseDotProduct(nn.Module):
28
+ def __init__(self):
29
+ super(PixelWiseDotProduct, self).__init__()
30
+
31
+ def forward(self, x, K):
32
+ n, c, h, w = x.size()
33
+ _, cout, ck = K.size()
34
+ assert c == ck, "Number of channels in x and Embedding dimension (at dim 2) of K matrix must match"
35
+ y = torch.matmul(x.view(n, c, h * w).permute(0, 2, 1), K.permute(0, 2, 1)) # .shape = n, hw, cout
36
+ return y.permute(0, 2, 1).view(n, cout, h, w)
extensions/deforum/scripts/deforum_helpers/src/adabins/miniViT.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .layers import PatchTransformerEncoder, PixelWiseDotProduct
5
+
6
+
7
+ class mViT(nn.Module):
8
+ def __init__(self, in_channels, n_query_channels=128, patch_size=16, dim_out=256,
9
+ embedding_dim=128, num_heads=4, norm='linear'):
10
+ super(mViT, self).__init__()
11
+ self.norm = norm
12
+ self.n_query_channels = n_query_channels
13
+ self.patch_transformer = PatchTransformerEncoder(in_channels, patch_size, embedding_dim, num_heads)
14
+ self.dot_product_layer = PixelWiseDotProduct()
15
+
16
+ self.conv3x3 = nn.Conv2d(in_channels, embedding_dim, kernel_size=3, stride=1, padding=1)
17
+ self.regressor = nn.Sequential(nn.Linear(embedding_dim, 256),
18
+ nn.LeakyReLU(),
19
+ nn.Linear(256, 256),
20
+ nn.LeakyReLU(),
21
+ nn.Linear(256, dim_out))
22
+
23
+ def forward(self, x):
24
+ # n, c, h, w = x.size()
25
+ tgt = self.patch_transformer(x.clone()) # .shape = S, N, E
26
+
27
+ x = self.conv3x3(x)
28
+
29
+ regression_head, queries = tgt[0, ...], tgt[1:self.n_query_channels + 1, ...]
30
+
31
+ # Change from S, N, E to N, S, E
32
+ queries = queries.permute(1, 0, 2)
33
+ range_attention_maps = self.dot_product_layer(x, queries) # .shape = n, n_query_channels, h, w
34
+
35
+ y = self.regressor(regression_head) # .shape = N, dim_out
36
+ if self.norm == 'linear':
37
+ y = torch.relu(y)
38
+ eps = 0.1
39
+ y = y + eps
40
+ elif self.norm == 'softmax':
41
+ return torch.softmax(y, dim=1), range_attention_maps
42
+ else:
43
+ y = torch.sigmoid(y)
44
+ y = y / y.sum(dim=1, keepdim=True)
45
+ return y, range_attention_maps
extensions/deforum/scripts/deforum_helpers/src/adabins/unet_adaptive_bins.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import os
5
+ from pathlib import Path
6
+
7
+ from .miniViT import mViT
8
+
9
+
10
+ class UpSampleBN(nn.Module):
11
+ def __init__(self, skip_input, output_features):
12
+ super(UpSampleBN, self).__init__()
13
+
14
+ self._net = nn.Sequential(nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1),
15
+ nn.BatchNorm2d(output_features),
16
+ nn.LeakyReLU(),
17
+ nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1),
18
+ nn.BatchNorm2d(output_features),
19
+ nn.LeakyReLU())
20
+
21
+ def forward(self, x, concat_with):
22
+ up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True)
23
+ f = torch.cat([up_x, concat_with], dim=1)
24
+ return self._net(f)
25
+
26
+
27
+ class DecoderBN(nn.Module):
28
+ def __init__(self, num_features=2048, num_classes=1, bottleneck_features=2048):
29
+ super(DecoderBN, self).__init__()
30
+ features = int(num_features)
31
+
32
+ self.conv2 = nn.Conv2d(bottleneck_features, features, kernel_size=1, stride=1, padding=1)
33
+
34
+ self.up1 = UpSampleBN(skip_input=features // 1 + 112 + 64, output_features=features // 2)
35
+ self.up2 = UpSampleBN(skip_input=features // 2 + 40 + 24, output_features=features // 4)
36
+ self.up3 = UpSampleBN(skip_input=features // 4 + 24 + 16, output_features=features // 8)
37
+ self.up4 = UpSampleBN(skip_input=features // 8 + 16 + 8, output_features=features // 16)
38
+
39
+ # self.up5 = UpSample(skip_input=features // 16 + 3, output_features=features//16)
40
+ self.conv3 = nn.Conv2d(features // 16, num_classes, kernel_size=3, stride=1, padding=1)
41
+ # self.act_out = nn.Softmax(dim=1) if output_activation == 'softmax' else nn.Identity()
42
+
43
+ def forward(self, features):
44
+ x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[
45
+ 11]
46
+
47
+ x_d0 = self.conv2(x_block4)
48
+
49
+ x_d1 = self.up1(x_d0, x_block3)
50
+ x_d2 = self.up2(x_d1, x_block2)
51
+ x_d3 = self.up3(x_d2, x_block1)
52
+ x_d4 = self.up4(x_d3, x_block0)
53
+ # x_d5 = self.up5(x_d4, features[0])
54
+ out = self.conv3(x_d4)
55
+ # out = self.act_out(out)
56
+ # if with_features:
57
+ # return out, features[-1]
58
+ # elif with_intermediate:
59
+ # return out, [x_block0, x_block1, x_block2, x_block3, x_block4, x_d1, x_d2, x_d3, x_d4]
60
+ return out
61
+
62
+
63
+ class Encoder(nn.Module):
64
+ def __init__(self, backend):
65
+ super(Encoder, self).__init__()
66
+ self.original_model = backend
67
+
68
+ def forward(self, x):
69
+ features = [x]
70
+ for k, v in self.original_model._modules.items():
71
+ if (k == 'blocks'):
72
+ for ki, vi in v._modules.items():
73
+ features.append(vi(features[-1]))
74
+ else:
75
+ features.append(v(features[-1]))
76
+ return features
77
+
78
+
79
+ class UnetAdaptiveBins(nn.Module):
80
+ def __init__(self, backend, n_bins=100, min_val=0.1, max_val=10, norm='linear'):
81
+ super(UnetAdaptiveBins, self).__init__()
82
+ self.num_classes = n_bins
83
+ self.min_val = min_val
84
+ self.max_val = max_val
85
+ self.encoder = Encoder(backend)
86
+ self.adaptive_bins_layer = mViT(128, n_query_channels=128, patch_size=16,
87
+ dim_out=n_bins,
88
+ embedding_dim=128, norm=norm)
89
+
90
+ self.decoder = DecoderBN(num_classes=128)
91
+ self.conv_out = nn.Sequential(nn.Conv2d(128, n_bins, kernel_size=1, stride=1, padding=0),
92
+ nn.Softmax(dim=1))
93
+
94
+ def forward(self, x, **kwargs):
95
+ unet_out = self.decoder(self.encoder(x), **kwargs)
96
+ bin_widths_normed, range_attention_maps = self.adaptive_bins_layer(unet_out)
97
+ out = self.conv_out(range_attention_maps)
98
+
99
+ # Post process
100
+ # n, c, h, w = out.shape
101
+ # hist = torch.sum(out.view(n, c, h * w), dim=2) / (h * w) # not used for training
102
+
103
+ bin_widths = (self.max_val - self.min_val) * bin_widths_normed # .shape = N, dim_out
104
+ bin_widths = nn.functional.pad(bin_widths, (1, 0), mode='constant', value=self.min_val)
105
+ bin_edges = torch.cumsum(bin_widths, dim=1)
106
+
107
+ centers = 0.5 * (bin_edges[:, :-1] + bin_edges[:, 1:])
108
+ n, dout = centers.size()
109
+ centers = centers.view(n, dout, 1, 1)
110
+
111
+ pred = torch.sum(out * centers, dim=1, keepdim=True)
112
+
113
+ return bin_edges, pred
114
+
115
+ def get_1x_lr_params(self): # lr/10 learning rate
116
+ return self.encoder.parameters()
117
+
118
+ def get_10x_lr_params(self): # lr learning rate
119
+ modules = [self.decoder, self.adaptive_bins_layer, self.conv_out]
120
+ for m in modules:
121
+ yield from m.parameters()
122
+
123
+ @classmethod
124
+ def build(cls, n_bins, **kwargs):
125
+ basemodel_name = 'tf_efficientnet_b5_ap'
126
+
127
+ print('Loading base model ()...'.format(basemodel_name), end='')
128
+ predicted_torch_model_cache_path = str(Path.home()) + '\\.cache\\torch\\hub\\rwightman_gen-efficientnet-pytorch_master'
129
+ predicted_gep_cache_testilfe = Path(predicted_torch_model_cache_path + '\\hubconf.py')
130
+ #print(f"predicted_gep_cache_testilfe: {predicted_gep_cache_testilfe}")
131
+ # try to fetch the models from cache, and only if it can't be find, download from the internet (to enable offline usage)
132
+ if os.path.isfile(predicted_gep_cache_testilfe):
133
+ basemodel = torch.hub.load(predicted_torch_model_cache_path, basemodel_name, pretrained=True, source = 'local')
134
+ else:
135
+ basemodel = torch.hub.load('rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True)
136
+ print('Done.')
137
+
138
+ # Remove last layer
139
+ print('Removing last two layers (global_pool & classifier).')
140
+ basemodel.global_pool = nn.Identity()
141
+ basemodel.classifier = nn.Identity()
142
+
143
+ # Building Encoder-Decoder model
144
+ print('Building Encoder-Decoder model..', end='')
145
+ m = cls(basemodel, n_bins=n_bins, **kwargs)
146
+ print('Done.')
147
+ return m
148
+
149
+
150
+ if __name__ == '__main__':
151
+ model = UnetAdaptiveBins.build(100)
152
+ x = torch.rand(2, 3, 480, 640)
153
+ bins, pred = model(x)
154
+ print(bins.shape, pred.shape)
extensions/deforum/scripts/deforum_helpers/src/clipseg/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ This license does not apply to the model weights.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
extensions/deforum/scripts/deforum_helpers/src/clipseg/Quickstart.ipynb ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import torch\n",
10
+ "import requests\n",
11
+ "\n",
12
+ "! wget https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download -O weights.zip\n",
13
+ "! unzip -d weights -j weights.zip\n",
14
+ "from models.clipseg import CLIPDensePredT\n",
15
+ "from PIL import Image\n",
16
+ "from torchvision import transforms\n",
17
+ "from matplotlib import pyplot as plt\n",
18
+ "\n",
19
+ "# load model\n",
20
+ "model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64)\n",
21
+ "model.eval();\n",
22
+ "\n",
23
+ "# non-strict, because we only stored decoder weights (not CLIP weights)\n",
24
+ "model.load_state_dict(torch.load('weights/rd64-uni.pth', map_location=torch.device('cpu')), strict=False);"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "markdown",
29
+ "metadata": {},
30
+ "source": [
31
+ "Load and normalize `example_image.jpg`. You can also load through an URL."
32
+ ]
33
+ },
34
+ {
35
+ "cell_type": "code",
36
+ "execution_count": null,
37
+ "metadata": {},
38
+ "outputs": [],
39
+ "source": [
40
+ "# load and normalize image\n",
41
+ "input_image = Image.open('example_image.jpg')\n",
42
+ "\n",
43
+ "# or load from URL...\n",
44
+ "# image_url = 'https://farm5.staticflickr.com/4141/4856248695_03475782dc_z.jpg'\n",
45
+ "# input_image = Image.open(requests.get(image_url, stream=True).raw)\n",
46
+ "\n",
47
+ "transform = transforms.Compose([\n",
48
+ " transforms.ToTensor(),\n",
49
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n",
50
+ " transforms.Resize((352, 352)),\n",
51
+ "])\n",
52
+ "img = transform(input_image).unsqueeze(0)"
53
+ ]
54
+ },
55
+ {
56
+ "cell_type": "markdown",
57
+ "metadata": {},
58
+ "source": [
59
+ "Predict and visualize (this might take a few seconds if running without GPU support)"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": null,
65
+ "metadata": {},
66
+ "outputs": [],
67
+ "source": [
68
+ "prompts = ['a glass', 'something to fill', 'wood', 'a jar']\n",
69
+ "\n",
70
+ "# predict\n",
71
+ "with torch.no_grad():\n",
72
+ " preds = model(img.repeat(4,1,1,1), prompts)[0]\n",
73
+ "\n",
74
+ "# visualize prediction\n",
75
+ "_, ax = plt.subplots(1, 5, figsize=(15, 4))\n",
76
+ "[a.axis('off') for a in ax.flatten()]\n",
77
+ "ax[0].imshow(input_image)\n",
78
+ "[ax[i+1].imshow(torch.sigmoid(preds[i][0])) for i in range(4)];\n",
79
+ "[ax[i+1].text(0, -15, prompts[i]) for i in range(4)];"
80
+ ]
81
+ }
82
+ ],
83
+ "metadata": {
84
+ "interpreter": {
85
+ "hash": "800ed241f7db2bd3aa6942aa3be6809cdb30ee6b0a9e773dfecfa9fef1f4c586"
86
+ },
87
+ "kernelspec": {
88
+ "display_name": "Python 3",
89
+ "language": "python",
90
+ "name": "python3"
91
+ },
92
+ "language_info": {
93
+ "codemirror_mode": {
94
+ "name": "ipython",
95
+ "version": 3
96
+ },
97
+ "file_extension": ".py",
98
+ "mimetype": "text/x-python",
99
+ "name": "python",
100
+ "nbconvert_exporter": "python",
101
+ "pygments_lexer": "ipython3",
102
+ "version": "3.8.10"
103
+ }
104
+ },
105
+ "nbformat": 4,
106
+ "nbformat_minor": 4
107
+ }
extensions/deforum/scripts/deforum_helpers/src/clipseg/Readme.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Image Segmentation Using Text and Image Prompts
2
+ This repository contains the code used in the paper ["Image Segmentation Using Text and Image Prompts"](https://arxiv.org/abs/2112.10003).
3
+
4
+ **The Paper has been accepted to CVPR 2022!**
5
+
6
+ <img src="overview.png" alt="drawing" height="200em"/>
7
+
8
+ The systems allows to create segmentation models without training based on:
9
+ - An arbitrary text query
10
+ - Or an image with a mask highlighting stuff or an object.
11
+
12
+ ### Quick Start
13
+
14
+ In the `Quickstart.ipynb` notebook we provide the code for using a pre-trained CLIPSeg model. If you run the notebook locally, make sure you downloaded the `rd64-uni.pth` weights, either manually or via git lfs extension.
15
+ It can also be used interactively using [MyBinder](https://mybinder.org/v2/gh/timojl/clipseg/HEAD?labpath=Quickstart.ipynb)
16
+ (please note that the VM does not use a GPU, thus inference takes a few seconds).
17
+
18
+
19
+ ### Dependencies
20
+ This code base depends on pytorch, torchvision and clip (`pip install git+https://github.com/openai/CLIP.git`).
21
+ Additional dependencies are hidden for double blind review.
22
+
23
+
24
+ ### Datasets
25
+
26
+ * `PhraseCut` and `PhraseCutPlus`: Referring expression dataset
27
+ * `PFEPascalWrapper`: Wrapper class for PFENet's Pascal-5i implementation
28
+ * `PascalZeroShot`: Wrapper class for PascalZeroShot
29
+ * `COCOWrapper`: Wrapper class for COCO.
30
+
31
+ ### Models
32
+
33
+ * `CLIPDensePredT`: CLIPSeg model with transformer-based decoder.
34
+ * `ViTDensePredT`: CLIPSeg model with transformer-based decoder.
35
+
36
+ ### Third Party Dependencies
37
+ For some of the datasets third party dependencies are required. Run the following commands in the `third_party` folder.
38
+ ```bash
39
+ git clone https://github.com/cvlab-yonsei/JoEm
40
+ git clone https://github.com/Jia-Research-Lab/PFENet.git
41
+ git clone https://github.com/ChenyunWu/PhraseCutDataset.git
42
+ git clone https://github.com/juhongm999/hsnet.git
43
+ ```
44
+
45
+ ### Weights
46
+
47
+ The MIT license does not apply to these weights.
48
+
49
+ We provide two model weights, for D=64 (4.1MB) and D=16 (1.1MB).
50
+ ```
51
+ wget https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download -O weights.zip
52
+ unzip -d weights -j weights.zip
53
+ ```
54
+
55
+
56
+ ### Training and Evaluation
57
+
58
+ To train use the `training.py` script with experiment file and experiment id parameters. E.g. `python training.py phrasecut.yaml 0` will train the first phrasecut experiment which is defined by the `configuration` and first `individual_configurations` parameters. Model weights will be written in `logs/`.
59
+
60
+ For evaluation use `score.py`. E.g. `python score.py phrasecut.yaml 0 0` will train the first phrasecut experiment of `test_configuration` and the first configuration in `individual_configurations`.
61
+
62
+
63
+ ### Usage of PFENet Wrappers
64
+
65
+ In order to use the dataset and model wrappers for PFENet, the PFENet repository needs to be cloned to the root folder.
66
+ `git clone https://github.com/Jia-Research-Lab/PFENet.git `
67
+
68
+
69
+ ### License
70
+
71
+ The source code files in this repository (excluding model weights) are released under MIT license.
72
+
73
+ ### Citation
74
+ ```
75
+ @InProceedings{lueddecke22_cvpr,
76
+ author = {L\"uddecke, Timo and Ecker, Alexander},
77
+ title = {Image Segmentation Using Text and Image Prompts},
78
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
79
+ month = {June},
80
+ year = {2022},
81
+ pages = {7086-7096}
82
+ }
83
+
84
+ ```
extensions/deforum/scripts/deforum_helpers/src/clipseg/Tables.ipynb ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "%load_ext autoreload\n",
10
+ "%autoreload 2\n",
11
+ "\n",
12
+ "import clip\n",
13
+ "from evaluation_utils import norm, denorm\n",
14
+ "from general_utils import *\n",
15
+ "from datasets.lvis_oneshot3 import LVIS_OneShot3, LVIS_OneShot"
16
+ ]
17
+ },
18
+ {
19
+ "cell_type": "markdown",
20
+ "metadata": {},
21
+ "source": [
22
+ "# PhraseCut"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": null,
28
+ "metadata": {},
29
+ "outputs": [],
30
+ "source": [
31
+ "pc = experiment('experiments/phrasecut.yaml', nums=':6').dataframe()"
32
+ ]
33
+ },
34
+ {
35
+ "cell_type": "code",
36
+ "execution_count": null,
37
+ "metadata": {},
38
+ "outputs": [],
39
+ "source": [
40
+ "tab1 = pc[['name', 'pc_miou_best', 'pc_fgiou_best', 'pc_ap']]"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "code",
45
+ "execution_count": null,
46
+ "metadata": {},
47
+ "outputs": [],
48
+ "source": [
49
+ "cols = ['pc_miou_0.3', 'pc_fgiou_0.3', 'pc_ap']\n",
50
+ "tab1 = pc[['name'] + cols]\n",
51
+ "for k in cols:\n",
52
+ " tab1.loc[:, k] = (100 * tab1.loc[:, k]).round(1)\n",
53
+ "tab1.loc[:, 'name'] = ['CLIPSeg (PC+)', 'CLIPSeg (PC, $D=128$)', 'CLIPSeg (PC)', 'CLIP-Deconv', 'ViTSeg (PC+)', 'ViTSeg (PC)']\n",
54
+ "tab1.insert(1, 't', [0.3]*tab1.shape[0])\n",
55
+ "print(tab1.to_latex(header=False, index=False))"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "markdown",
60
+ "metadata": {},
61
+ "source": [
62
+ "For 0.1 threshold"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": null,
68
+ "metadata": {},
69
+ "outputs": [],
70
+ "source": [
71
+ "cols = ['pc_miou_0.1', 'pc_fgiou_0.1', 'pc_ap']\n",
72
+ "tab1 = pc[['name'] + cols]\n",
73
+ "for k in cols:\n",
74
+ " tab1.loc[:, k] = (100 * tab1.loc[:, k]).round(1)\n",
75
+ "tab1.loc[:, 'name'] = ['CLIPSeg (PC+)', 'CLIPSeg (PC, $D=128$)', 'CLIPSeg (PC)', 'CLIP-Deconv', 'ViTSeg (PC+)', 'ViTSeg (PC)']\n",
76
+ "tab1.insert(1, 't', [0.1]*tab1.shape[0])\n",
77
+ "print(tab1.to_latex(header=False, index=False))"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "markdown",
82
+ "metadata": {},
83
+ "source": [
84
+ "# One-shot"
85
+ ]
86
+ },
87
+ {
88
+ "cell_type": "markdown",
89
+ "metadata": {},
90
+ "source": [
91
+ "### Pascal"
92
+ ]
93
+ },
94
+ {
95
+ "cell_type": "code",
96
+ "execution_count": null,
97
+ "metadata": {},
98
+ "outputs": [],
99
+ "source": [
100
+ "pas = experiment('experiments/pascal_1shot.yaml', nums=':19').dataframe()"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": null,
106
+ "metadata": {},
107
+ "outputs": [],
108
+ "source": [
109
+ "pas[['name', 'pas_h2_miou_0.3', 'pas_h2_biniou_0.3', 'pas_h2_ap', 'pas_h2_fgiou_ct']]"
110
+ ]
111
+ },
112
+ {
113
+ "cell_type": "code",
114
+ "execution_count": null,
115
+ "metadata": {},
116
+ "outputs": [],
117
+ "source": [
118
+ "pas = experiment('experiments/pascal_1shot.yaml', nums=':8').dataframe()\n",
119
+ "tab1 = pas[['pas_h2_miou_0.3', 'pas_h2_biniou_0.3', 'pas_h2_ap']]\n",
120
+ "print('CLIPSeg (PC+) & 0.3 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')\n",
121
+ "print('CLIPSeg (PC) & 0.3 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[4:8].mean(0).values), '\\\\\\\\')\n",
122
+ "\n",
123
+ "pas = experiment('experiments/pascal_1shot.yaml', nums='12:16').dataframe()\n",
124
+ "tab1 = pas[['pas_h2_miou_0.2', 'pas_h2_biniou_0.2', 'pas_h2_ap']]\n",
125
+ "print('CLIP-Deconv (PC+) & 0.2 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')\n",
126
+ "\n",
127
+ "pas = experiment('experiments/pascal_1shot.yaml', nums='16:20').dataframe()\n",
128
+ "tab1 = pas[['pas_t_miou_0.2', 'pas_t_biniou_0.2', 'pas_t_ap']]\n",
129
+ "print('ViTSeg (PC+) & 0.2 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')"
130
+ ]
131
+ },
132
+ {
133
+ "cell_type": "markdown",
134
+ "metadata": {},
135
+ "source": [
136
+ "#### Pascal Zero-shot (in one-shot setting)\n",
137
+ "\n",
138
+ "Using the same setting as one-shot (hence different from the other zero-shot benchmark)"
139
+ ]
140
+ },
141
+ {
142
+ "cell_type": "code",
143
+ "execution_count": null,
144
+ "metadata": {},
145
+ "outputs": [],
146
+ "source": [
147
+ "pas = experiment('experiments/pascal_1shot.yaml', nums=':8').dataframe()\n",
148
+ "tab1 = pas[['pas_t_miou_0.3', 'pas_t_biniou_0.3', 'pas_t_ap']]\n",
149
+ "print('CLIPSeg (PC+) & 0.3 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')\n",
150
+ "print('CLIPSeg (PC) & 0.3 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[4:8].mean(0).values), '\\\\\\\\')\n",
151
+ "\n",
152
+ "pas = experiment('experiments/pascal_1shot.yaml', nums='12:16').dataframe()\n",
153
+ "tab1 = pas[['pas_t_miou_0.3', 'pas_t_biniou_0.3', 'pas_t_ap']]\n",
154
+ "print('CLIP-Deconv (PC+) & 0.3 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')\n",
155
+ "\n",
156
+ "pas = experiment('experiments/pascal_1shot.yaml', nums='16:20').dataframe()\n",
157
+ "tab1 = pas[['pas_t_miou_0.2', 'pas_t_biniou_0.2', 'pas_t_ap']]\n",
158
+ "print('ViTSeg (PC+) & 0.2 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')"
159
+ ]
160
+ },
161
+ {
162
+ "cell_type": "code",
163
+ "execution_count": null,
164
+ "metadata": {},
165
+ "outputs": [],
166
+ "source": [
167
+ "# without fixed thresholds...\n",
168
+ "\n",
169
+ "pas = experiment('experiments/pascal_1shot.yaml', nums=':8').dataframe()\n",
170
+ "tab1 = pas[['pas_t_best_miou', 'pas_t_best_biniou', 'pas_t_ap']]\n",
171
+ "print('CLIPSeg (PC+) & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')\n",
172
+ "print('CLIPSeg (PC) & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[4:8].mean(0).values), '\\\\\\\\')\n",
173
+ "\n",
174
+ "pas = experiment('experiments/pascal_1shot.yaml', nums='12:16').dataframe()\n",
175
+ "tab1 = pas[['pas_t_best_miou', 'pas_t_best_biniou', 'pas_t_ap']]\n",
176
+ "print('CLIP-Deconv (PC+) & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "markdown",
181
+ "metadata": {},
182
+ "source": [
183
+ "### COCO"
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "code",
188
+ "execution_count": null,
189
+ "metadata": {},
190
+ "outputs": [],
191
+ "source": [
192
+ "coco = experiment('experiments/coco.yaml', nums=':29').dataframe()"
193
+ ]
194
+ },
195
+ {
196
+ "cell_type": "code",
197
+ "execution_count": null,
198
+ "metadata": {},
199
+ "outputs": [],
200
+ "source": [
201
+ "tab1 = coco[['coco_h2_miou_0.1', 'coco_h2_biniou_0.1', 'coco_h2_ap']]\n",
202
+ "tab2 = coco[['coco_h2_miou_0.2', 'coco_h2_biniou_0.2', 'coco_h2_ap']]\n",
203
+ "tab3 = coco[['coco_h2_miou_best', 'coco_h2_biniou_best', 'coco_h2_ap']]\n",
204
+ "print('CLIPSeg (COCO) & 0.1 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[:4].mean(0).values), '\\\\\\\\')\n",
205
+ "print('CLIPSeg (COCO+N) & 0.1 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[4:8].mean(0).values), '\\\\\\\\')\n",
206
+ "print('CLIP-Deconv (COCO+N) & 0.1 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[12:16].mean(0).values), '\\\\\\\\')\n",
207
+ "print('ViTSeg (COCO) & 0.1 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[8:12].mean(0).values), '\\\\\\\\')"
208
+ ]
209
+ },
210
+ {
211
+ "cell_type": "markdown",
212
+ "metadata": {},
213
+ "source": [
214
+ "# Zero-shot"
215
+ ]
216
+ },
217
+ {
218
+ "cell_type": "code",
219
+ "execution_count": null,
220
+ "metadata": {},
221
+ "outputs": [],
222
+ "source": [
223
+ "zs = experiment('experiments/pascal_0shot.yaml', nums=':11').dataframe()"
224
+ ]
225
+ },
226
+ {
227
+ "cell_type": "code",
228
+ "execution_count": null,
229
+ "metadata": {},
230
+ "outputs": [],
231
+ "source": [
232
+ "\n",
233
+ "tab1 = zs[['pas_zs_seen', 'pas_zs_unseen']]\n",
234
+ "print('CLIPSeg (PC+) & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[8:9].values[0].tolist() + tab1[10:11].values[0].tolist()), '\\\\\\\\')\n",
235
+ "print('CLIP-Deconv & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[2:3].values[0].tolist() + tab1[3:4].values[0].tolist()), '\\\\\\\\')\n",
236
+ "print('ViTSeg & ImageNet-1K & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[4:5].values[0].tolist() + tab1[5:6].values[0].tolist()), '\\\\\\\\')"
237
+ ]
238
+ },
239
+ {
240
+ "cell_type": "markdown",
241
+ "metadata": {},
242
+ "source": [
243
+ "# Ablation"
244
+ ]
245
+ },
246
+ {
247
+ "cell_type": "code",
248
+ "execution_count": null,
249
+ "metadata": {},
250
+ "outputs": [],
251
+ "source": [
252
+ "ablation = experiment('experiments/ablation.yaml', nums=':8').dataframe()"
253
+ ]
254
+ },
255
+ {
256
+ "cell_type": "code",
257
+ "execution_count": null,
258
+ "metadata": {},
259
+ "outputs": [],
260
+ "source": [
261
+ "tab1 = ablation[['name', 'pc_miou_best', 'pc_ap', 'pc-vis_miou_best', 'pc-vis_ap']]\n",
262
+ "for k in ['pc_miou_best', 'pc_ap', 'pc-vis_miou_best', 'pc-vis_ap']:\n",
263
+ " tab1.loc[:, k] = (100 * tab1.loc[:, k]).round(1)\n",
264
+ "tab1.loc[:, 'name'] = ['CLIPSeg', 'no CLIP pre-training', 'no-negatives', '50% negatives', 'no visual', '$D=16$', 'only layer 3', 'highlight mask']"
265
+ ]
266
+ },
267
+ {
268
+ "cell_type": "code",
269
+ "execution_count": null,
270
+ "metadata": {},
271
+ "outputs": [],
272
+ "source": [
273
+ "print(tab1.loc[[0,1,4,5,6,7],:].to_latex(header=False, index=False))"
274
+ ]
275
+ },
276
+ {
277
+ "cell_type": "code",
278
+ "execution_count": null,
279
+ "metadata": {},
280
+ "outputs": [],
281
+ "source": [
282
+ "print(tab1.loc[[0,1,4,5,6,7],:].to_latex(header=False, index=False))"
283
+ ]
284
+ },
285
+ {
286
+ "cell_type": "markdown",
287
+ "metadata": {},
288
+ "source": [
289
+ "# Generalization"
290
+ ]
291
+ },
292
+ {
293
+ "cell_type": "code",
294
+ "execution_count": null,
295
+ "metadata": {},
296
+ "outputs": [],
297
+ "source": [
298
+ "generalization = experiment('experiments/generalize.yaml').dataframe()"
299
+ ]
300
+ },
301
+ {
302
+ "cell_type": "code",
303
+ "execution_count": null,
304
+ "metadata": {},
305
+ "outputs": [],
306
+ "source": [
307
+ "gen = generalization[['aff_best_fgiou', 'aff_ap', 'ability_best_fgiou', 'ability_ap', 'part_best_fgiou', 'part_ap']].values"
308
+ ]
309
+ },
310
+ {
311
+ "cell_type": "code",
312
+ "execution_count": null,
313
+ "metadata": {},
314
+ "outputs": [],
315
+ "source": [
316
+ "print(\n",
317
+ " 'CLIPSeg (PC+) & ' + ' & '.join(f'{x*100:.1f}' for x in gen[1]) + ' \\\\\\\\ \\n' + \\\n",
318
+ " 'CLIPSeg (LVIS) & ' + ' & '.join(f'{x*100:.1f}' for x in gen[0]) + ' \\\\\\\\ \\n' + \\\n",
319
+ " 'CLIP-Deconv & ' + ' & '.join(f'{x*100:.1f}' for x in gen[2]) + ' \\\\\\\\ \\n' + \\\n",
320
+ " 'VITSeg & ' + ' & '.join(f'{x*100:.1f}' for x in gen[3]) + ' \\\\\\\\'\n",
321
+ ")"
322
+ ]
323
+ }
324
+ ],
325
+ "metadata": {
326
+ "interpreter": {
327
+ "hash": "800ed241f7db2bd3aa6942aa3be6809cdb30ee6b0a9e773dfecfa9fef1f4c586"
328
+ },
329
+ "kernelspec": {
330
+ "display_name": "env2",
331
+ "language": "python",
332
+ "name": "env2"
333
+ },
334
+ "language_info": {
335
+ "codemirror_mode": {
336
+ "name": "ipython",
337
+ "version": 3
338
+ },
339
+ "file_extension": ".py",
340
+ "mimetype": "text/x-python",
341
+ "name": "python",
342
+ "nbconvert_exporter": "python",
343
+ "pygments_lexer": "ipython3",
344
+ "version": "3.8.8"
345
+ }
346
+ },
347
+ "nbformat": 4,
348
+ "nbformat_minor": 4
349
+ }