Kizi-Art commited on
Commit
5a21817
1 Parent(s): 2b5e484

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. extensions-builtin/sd-webui-deforum/.github/FUNDING.yml +13 -0
  3. extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/bug_report.yml +105 -0
  4. extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/config.yml +8 -0
  5. extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/feature_request.yml +46 -0
  6. extensions-builtin/sd-webui-deforum/.github/scripts/issue_checker.py +126 -0
  7. extensions-builtin/sd-webui-deforum/.github/workflows/issue_checker.yaml +23 -0
  8. extensions-builtin/sd-webui-deforum/.github/workflows/run_tests.yaml +108 -0
  9. extensions-builtin/sd-webui-deforum/.gitignore +34 -0
  10. extensions-builtin/sd-webui-deforum/CONTRIBUTING.md +5 -0
  11. extensions-builtin/sd-webui-deforum/LICENSE +0 -0
  12. extensions-builtin/sd-webui-deforum/README.md +73 -0
  13. extensions-builtin/sd-webui-deforum/install.py +26 -0
  14. extensions-builtin/sd-webui-deforum/javascript/deforum-hints.js +232 -0
  15. extensions-builtin/sd-webui-deforum/javascript/deforum.js +33 -0
  16. extensions-builtin/sd-webui-deforum/preload.py +42 -0
  17. extensions-builtin/sd-webui-deforum/pytest.ini +3 -0
  18. extensions-builtin/sd-webui-deforum/requirements-dev.txt +6 -0
  19. extensions-builtin/sd-webui-deforum/requirements.txt +8 -0
  20. extensions-builtin/sd-webui-deforum/scripts/default_settings.txt +259 -0
  21. extensions-builtin/sd-webui-deforum/scripts/deforum.py +42 -0
  22. extensions-builtin/sd-webui-deforum/scripts/deforum_api.py +485 -0
  23. extensions-builtin/sd-webui-deforum/scripts/deforum_api_models.py +60 -0
  24. extensions-builtin/sd-webui-deforum/scripts/deforum_extend_paths.py +33 -0
  25. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/114763196.jpg +3 -0
  26. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/RAFT.py +44 -0
  27. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/animation.py +429 -0
  28. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/animation_key_frames.py +166 -0
  29. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/args.py +1179 -0
  30. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/auto_navigation.py +88 -0
  31. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/colors.py +36 -0
  32. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/composable_masks.py +212 -0
  33. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/consistency_check.py +148 -0
  34. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/defaults.py +219 -0
  35. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_controlnet.py +368 -0
  36. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_controlnet_gradio.py +50 -0
  37. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_tqdm.py +98 -0
  38. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deprecation_utils.py +98 -0
  39. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth.py +159 -0
  40. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_adabins.py +78 -0
  41. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_leres.py +71 -0
  42. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_midas.py +91 -0
  43. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_zoe.py +46 -0
  44. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/frame_interpolation.py +240 -0
  45. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/general_utils.py +144 -0
  46. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/generate.py +368 -0
  47. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/gradio_funcs.py +296 -0
  48. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/human_masking.py +86 -0
  49. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/hybrid_video.py +613 -0
  50. extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/image_sharpening.py +38 -0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ extensions-builtin/sd-webui-deforum/tests/testdata/example_init_vid.mp4 filter=lfs diff=lfs merge=lfs -text
extensions-builtin/sd-webui-deforum/.github/FUNDING.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # These are supported funding model platforms
2
+
3
+ github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4
+ patreon: deforum
5
+ open_collective: # Replace with a single Open Collective username
6
+ ko_fi: # Replace with a single Ko-fi username
7
+ tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8
+ community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9
+ liberapay: # Replace with a single Liberapay username
10
+ issuehunt: # Replace with a single IssueHunt username
11
+ otechie: # Replace with a single Otechie username
12
+ lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
13
+ custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/bug_report.yml ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug Report
2
+ description: Create a bug report for the Deforum extension
3
+ title: "[Bug]: "
4
+ labels: ["bug"]
5
+
6
+ body:
7
+ - type: checkboxes
8
+ attributes:
9
+ label: Have you read the latest version of the FAQ?
10
+ description: Please visit the page called FAQ & Troubleshooting on the Deforum wiki in this repository and see if your problem has already been described there.
11
+ options:
12
+ - label: I have visited the FAQ page right now and my issue is not present there
13
+ required: true
14
+ - type: checkboxes
15
+ attributes:
16
+ label: Is there an existing issue for this?
17
+ description: Please search to see if an issue already exists for the bug you encountered (including the closed issues).
18
+ options:
19
+ - label: I have searched the existing issues and checked the recent builds/commits of both this extension and the webui
20
+ required: true
21
+ - type: checkboxes
22
+ attributes:
23
+ label: Are you using the latest version of the Deforum extension?
24
+ description: Please, check if your Deforum is based on the latest repo commit (git log) or update it through the 'Extensions' tab and check if the issue still persist. Otherwise, check this box.
25
+ options:
26
+ - label: I have Deforum updated to the lastest version and I still have the issue.
27
+ required: true
28
+ - type: markdown
29
+ attributes:
30
+ value: |
31
+ *Please fill this form with as much information as possible, don't forget to fill "What OS..." and *provide screenshots if possible**
32
+ - type: markdown
33
+ attributes:
34
+ value: |
35
+ **Forewarning:* if you won't provide the full crash log, your issue will be discarded*
36
+ - type: textarea
37
+ id: what-did
38
+ attributes:
39
+ label: What happened?
40
+ description: Tell us what happened in a very clear and simple way
41
+ validations:
42
+ required: true
43
+ - type: textarea
44
+ id: steps
45
+ attributes:
46
+ label: Steps to reproduce the problem
47
+ description: Please provide us with precise step by step information on how to reproduce the bug
48
+ value: |
49
+ 1. Go to ....
50
+ 2. Press ....
51
+ 3. ...
52
+ validations:
53
+ required: true
54
+ - type: textarea
55
+ id: what-should
56
+ attributes:
57
+ label: What should have happened/how would you fix it?
58
+ description: Tell what you think the normal behavior should be or any ideas on how to solve it
59
+ - type: textarea
60
+ id: what-torch
61
+ attributes:
62
+ label: Torch version
63
+ description: Which Torch version your WebUI is working with. You can find it by looking at the bottom of the page.
64
+ validations:
65
+ required: true
66
+ - type: dropdown
67
+ id: where
68
+ attributes:
69
+ label: On which platform are you launching the webui with the extension?
70
+ multiple: true
71
+ options:
72
+ - Local PC setup (Windows)
73
+ - Local PC setup (Linux)
74
+ - Local PC setup (Mac)
75
+ - Google Colab (The Last Ben's)
76
+ - Google Colab (Other)
77
+ - Cloud server (Linux)
78
+ - Other (please specify in "additional information")
79
+ - type: textarea
80
+ id: deforumsettings
81
+ attributes:
82
+ label: Deforum settings
83
+ description: Send here a link to your used settings file or the latest generated one in the 'outputs/img2img-images/Deforum/' folder (ideally, upload it to GitHub gists).
84
+ validations:
85
+ required: true
86
+ - type: textarea
87
+ id: customsettings
88
+ attributes:
89
+ label: Webui core settings
90
+ description: Send here a link to your ui-config.json file in the core 'stable-diffusion-webui' folder. Notice, if you have 'With img2img, do exactly the amount of steps the slider specified' checked, your issue will be discarded.
91
+ validations:
92
+ required: true
93
+ - type: textarea
94
+ id: logs
95
+ attributes:
96
+ label: Console logs
97
+ description: Now, it is the most important part which most users fail for the first time! Please provide the **full** cmd/terminal logs from the moment you started the webui (i.e. clicked the launch file or started it from cmd) to the part when your bug happened.
98
+ render: Shell
99
+ validations:
100
+ required: true
101
+ - type: textarea
102
+ id: misc
103
+ attributes:
104
+ label: Additional information
105
+ description: Any relevant additional info or context.
extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: Deforum Github discussions
4
+ url: https://github.com/deforum-art/deforum-for-automatic1111-webui/discussions
5
+ about: Please ask and answer questions here. If you want to complain about something, don't try to circumvent issue filling by starting a discussion here 🙃
6
+ - name: Deforum Discord
7
+ url: https://discord.gg/deforum
8
+ about: Here is our main community where we chat, discuss development and share experiments and results
extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/feature_request.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Feature request
2
+ description: Suggest an idea for the Deforum extension
3
+ title: "[Feature Request]: "
4
+ labels: ["enhancement"]
5
+
6
+ body:
7
+ - type: checkboxes
8
+ attributes:
9
+ label: Is there an existing issue for this?
10
+ description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit.
11
+ options:
12
+ - label: I have searched the existing issues and checked the recent builds/commits
13
+ required: true
14
+ - type: markdown
15
+ attributes:
16
+ value: |
17
+ *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible*
18
+ - type: textarea
19
+ id: feature
20
+ attributes:
21
+ label: What would your feature do ?
22
+ description: Tell us about your feature in a very clear and simple way, and what problem it would solve
23
+ validations:
24
+ required: true
25
+ - type: textarea
26
+ id: workflow
27
+ attributes:
28
+ label: Proposed workflow
29
+ description: Please provide us with step by step information on how you'd like the feature to be accessed and used
30
+ value: |
31
+ 1. Go to ....
32
+ 2. Press ....
33
+ 3. ...
34
+ validations:
35
+ required: true
36
+ - type: textarea
37
+ id: misc
38
+ attributes:
39
+ label: Additional information
40
+ description: Add any other context or screenshots about the feature request here.
41
+ - type: textarea
42
+ attributes:
43
+ label: Are you going to help adding it?
44
+ description: Do you want to participate in Deforum development and bring the desired feature sooner? Let us know if you are willing to add the desired feature, ideally, leave your Discord handle here, so we will contact you for a less formal conversation. Our community is welcoming and ready to provide you with any information on the project structure or how the code works. If not, however, keep in mind that if you do not want to do your new feature yourself, you will have to wait until the team picks up your issue.
45
+ validations:
46
+ required: true
extensions-builtin/sd-webui-deforum/.github/scripts/issue_checker.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import os
18
+ import re
19
+ from github import Github
20
+
21
+ # Get GitHub token from environment variables
22
+ token = os.environ['GITHUB_TOKEN']
23
+ g = Github(token)
24
+
25
+ # Get the current repository
26
+ print(f"Repo is {os.environ['GITHUB_REPOSITORY']}")
27
+ repo = g.get_repo(os.environ['GITHUB_REPOSITORY'])
28
+
29
+ # Get the issue number from the event payload
30
+ #issue_number = int(os.environ['ISSUE_NUMBER'])
31
+
32
+ for issue in repo.get_issues():
33
+ print(f"Processing issue №{issue.number}")
34
+ if issue.pull_request:
35
+ continue
36
+
37
+ # Get the issue object
38
+ #issue = repo.get_issue(issue_number)
39
+
40
+ # Define the keywords to search for in the issue
41
+ keywords = ['Python', 'Commit hash', 'Launching Web UI with arguments', 'Model loaded', 'deforum']
42
+
43
+ # Check if ALL of the keywords are present in the issue
44
+ def check_keywords(issue_body, keywords):
45
+ for keyword in keywords:
46
+ if not re.search(r'\b' + re.escape(keyword) + r'\b', issue_body, re.IGNORECASE):
47
+ return False
48
+ return True
49
+
50
+ # Check if the issue title has at least a specified number of words
51
+ def check_title_word_count(issue_title, min_word_count):
52
+ words = issue_title.replace("/", " ").replace("\\\\", " ").split()
53
+ return len(words) >= min_word_count
54
+
55
+ # Check if the issue title is concise
56
+ def check_title_concise(issue_title, max_word_count):
57
+ words = issue_title.replace("/", " ").replace("\\\\", " ").split()
58
+ return len(words) <= max_word_count
59
+
60
+ # Check if the commit ID is in the correct hash form
61
+ def check_commit_id_format(issue_body):
62
+ match = re.search(r'webui commit id - ([a-fA-F0-9]+|\[[a-fA-F0-9]+\])', issue_body)
63
+ if not match:
64
+ print('webui_commit_id not found')
65
+ return False
66
+ webui_commit_id = match.group(1)
67
+ print(f'webui_commit_id {webui_commit_id}')
68
+ webui_commit_id = webui_commit_id.replace("[", "").replace("]", "")
69
+ if not (7 <= len(webui_commit_id) <= 40):
70
+ print(f'invalid length!')
71
+ return False
72
+ match = re.search(r'deforum exten commit id - ([a-fA-F0-9]+|\[[a-fA-F0-9]+\])', issue_body)
73
+ if match:
74
+ print('deforum commit id not found')
75
+ return False
76
+ t2v_commit_id = match.group(1)
77
+ print(f'deforum_commit_id {t2v_commit_id}')
78
+ t2v_commit_id = t2v_commit_id.replace("[", "").replace("]", "")
79
+ if not (7 <= len(t2v_commit_id) <= 40):
80
+ print(f'invalid length!')
81
+ return False
82
+ return True
83
+
84
+ # Only if a bug report
85
+ if '[Bug]' in issue.title and not '[Feature Request]' in issue.title:
86
+ print('The issue is eligible')
87
+ # Initialize an empty list to store error messages
88
+ error_messages = []
89
+
90
+ # Check for each condition and add the corresponding error message if the condition is not met
91
+ if not check_keywords(issue.body, keywords):
92
+ error_messages.append("Include **THE FULL LOG FROM THE START OF THE WEBUI** in the issue description.")
93
+
94
+ if not check_title_word_count(issue.title, 3):
95
+ error_messages.append("Make sure the issue title has at least 3 words.")
96
+
97
+ if not check_title_concise(issue.title, 13):
98
+ error_messages.append("The issue title should be concise and contain no more than 13 words.")
99
+
100
+ # if not check_commit_id_format(issue.body):
101
+ # error_messages.append("Provide a valid commit ID in the format 'commit id - [commit_hash]' **both** for the WebUI and the Extension.")
102
+
103
+ # If there are any error messages, close the issue and send a comment with the error messages
104
+ if error_messages:
105
+ print('Invalid issue, closing')
106
+ # Add the "not planned" label to the issue
107
+ not_planned_label = repo.get_label("wrong format")
108
+ issue.add_to_labels(not_planned_label)
109
+
110
+ # Close the issue
111
+ issue.edit(state='closed')
112
+
113
+ # Generate the comment by concatenating the error messages
114
+ comment = "This issue has been closed due to incorrect formatting. Please address the following mistakes and reopen the issue (click on the 'Reopen' button below):\n\n"
115
+ comment += "\n".join(f"- {error_message}" for error_message in error_messages)
116
+
117
+ # Add the comment to the issue
118
+ issue.create_comment(comment)
119
+ elif repo.get_label("wrong format") in issue.labels:
120
+ print('Issue is fine')
121
+ issue.edit(state='open')
122
+ issue.delete_labels()
123
+ bug_label = repo.get_label("bug")
124
+ issue.add_to_labels(bug_label)
125
+ comment = "Thanks for addressing your formatting mistakes. The issue has been reopened now."
126
+ issue.create_comment(comment)
extensions-builtin/sd-webui-deforum/.github/workflows/issue_checker.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Issue Checker
2
+
3
+ on:
4
+ issues:
5
+ types: [opened, reopened, edited]
6
+
7
+ jobs:
8
+ check_issue:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - name: Checkout repository
12
+ uses: actions/checkout@v3
13
+ - name: Set up Python
14
+ uses: actions/setup-python@v3
15
+ with:
16
+ python-version: '3.x'
17
+ - name: Install dependencies
18
+ run: pip install PyGithub
19
+ - name: Check issue
20
+ env:
21
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
22
+ ISSUE_NUMBER: ${{ github.event.number }}
23
+ run: python .github/scripts/issue_checker.py
extensions-builtin/sd-webui-deforum/.github/workflows/run_tests.yaml ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Tests
2
+
3
+ on:
4
+ - push
5
+ - pull_request
6
+
7
+ jobs:
8
+ test:
9
+ name: tests on CPU with empty model
10
+ runs-on: ubuntu-latest
11
+ if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
12
+ steps:
13
+ - name: Checkout a1111
14
+ uses: actions/checkout@v3
15
+ with:
16
+ repository: AUTOMATIC1111/stable-diffusion-webui
17
+ ref: v1.6.0
18
+ - name: Checkout Controlnet extension
19
+ uses: actions/checkout@v3
20
+ with:
21
+ repository: Mikubill/sd-webui-controlnet
22
+ path: extensions/sd-webui-controlnet
23
+ - name: Checkout Deforum
24
+ uses: actions/checkout@v3
25
+ with:
26
+ path: extensions/deforum
27
+ - name: Set up Python 3.10
28
+ uses: actions/setup-python@v4
29
+ with:
30
+ python-version: 3.10.6
31
+ cache: pip
32
+ cache-dependency-path: |
33
+ **/requirements*txt
34
+ launch.py
35
+ - name: Install test dependencies
36
+ run: pip install wait-for-it -r extensions/deforum/requirements-dev.txt
37
+ env:
38
+ PIP_DISABLE_PIP_VERSION_CHECK: "1"
39
+ PIP_PROGRESS_BAR: "off"
40
+ - name: Setup environment
41
+ run: python launch.py --skip-torch-cuda-test --exit
42
+ env:
43
+ PIP_DISABLE_PIP_VERSION_CHECK: "1"
44
+ PIP_PROGRESS_BAR: "off"
45
+ TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu
46
+ WEBUI_LAUNCH_LIVE_OUTPUT: "1"
47
+ PYTHONUNBUFFERED: "1"
48
+ - name: Start test server
49
+ run: >
50
+ python -m coverage run
51
+ --data-file=.coverage.server
52
+ launch.py
53
+ --skip-prepare-environment
54
+ --skip-torch-cuda-test
55
+ --test-server
56
+ --do-not-download-clip
57
+ --no-half
58
+ --disable-opt-split-attention
59
+ --use-cpu all
60
+ --api-server-stop
61
+ --deforum-api
62
+ --api
63
+ 2>&1 | tee serverlog.txt &
64
+ - name: Run tests (with continue-on-error due to mysterious non-zero return code on success)
65
+ continue-on-error: true
66
+ id: runtests
67
+ run: |
68
+ wait-for-it --service 127.0.0.1:7860 -t 600
69
+ cd extensions/deforum
70
+ python -m coverage run --data-file=.coverage.client -m pytest -vv --junitxml=tests/results.xml tests
71
+ - name: Check for test failures (necessary because of continue-on-error above)
72
+ id: testresults
73
+ uses: mavrosxristoforos/get-xml-info@1.1.0
74
+ with:
75
+ xml-file: 'extensions/deforum/tests/results.xml'
76
+ xpath: '//testsuite/@failures'
77
+ - name: Fail if there were test failures
78
+ run: |
79
+ echo "Test failures: ${{ steps.testresults.outputs.info }}"
80
+ [ ${{ steps.testresults.outputs.info }} -eq 0 ]
81
+ - name: Kill test server
82
+ if: always()
83
+ run: curl -vv -XPOST http://127.0.0.1:7860/sdapi/v1/server-stop && sleep 10
84
+ - name: Show coverage
85
+ run: |
86
+ python -m coverage combine .coverage* extensions/deforum/.coverage*
87
+ python -m coverage report -i
88
+ python -m coverage html -i
89
+ - name: Upload main app output
90
+ uses: actions/upload-artifact@v3
91
+ if: always()
92
+ with:
93
+ name: serverlog
94
+ path: serverlog.txt
95
+ - name: Upload coverage HTML
96
+ uses: actions/upload-artifact@v3
97
+ if: always()
98
+ with:
99
+ name: htmlcov
100
+ path: htmlcov
101
+ - name: Surface failing tests
102
+ if: always()
103
+ uses: pmeier/pytest-results-action@main
104
+ with:
105
+ path: extensions/deforum/tests/results.xml
106
+ summary: true
107
+ display-options: fEX
108
+ fail-on-empty: true
extensions-builtin/sd-webui-deforum/.gitignore ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ # Unnecessary compiled python files.
18
+ __pycache__
19
+ *.pyc
20
+ *.pyo
21
+
22
+ # Output Images
23
+ outputs
24
+
25
+ # Log files for colab-convert
26
+ cc-outputs.log
27
+ *.safetensors
28
+ scripts/deforum_helpers/navigation.py
29
+
30
+ #test output
31
+ htmlcov
32
+ tests/results.xml
33
+ .coverage*
34
+ serverlog.txt
extensions-builtin/sd-webui-deforum/CONTRIBUTING.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Contributing
2
+
3
+ When contributing please ping the devs via Discord https://discord.gg/deforum to make sure you addition will fit well such a large project and to get help if needed.
4
+
5
+ *By contributing to this project you agree that your work will be granted copyright to Deforum LLC and licensed under the terms of the GNU Affero General Public License version 3.*
extensions-builtin/sd-webui-deforum/LICENSE ADDED
The diff for this file is too large to render. See raw diff
 
extensions-builtin/sd-webui-deforum/README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Deforum Stable Diffusion — official extension for AUTOMATIC1111's webui
3
+
4
+ <p align="left">
5
+ <a href="https://github.com/deforum-art/sd-webui-deforum/commits"><img alt="Last Commit" src="https://img.shields.io/github/last-commit/deforum-art/deforum-for-automatic1111-webui"></a>
6
+ <a href="https://github.com/deforum-art/sd-webui-deforum/issues"><img alt="GitHub issues" src="https://img.shields.io/github/issues/deforum-art/deforum-for-automatic1111-webui"></a>
7
+ <a href="https://github.com/deforum-art/sd-webui-deforum/stargazers"><img alt="GitHub stars" src="https://img.shields.io/github/stars/deforum-art/deforum-for-automatic1111-webui"></a>
8
+ <a href="https://github.com/deforum-art/sd-webui-deforum/network"><img alt="GitHub forks" src="https://img.shields.io/github/forks/deforum-art/deforum-for-automatic1111-webui"></a>
9
+ </a>
10
+ </p>
11
+
12
+ ## Need help? See our [FAQ](https://github.com/deforum-art/sd-webui-deforum/wiki/FAQ-&-Troubleshooting)
13
+
14
+ ## Getting Started
15
+
16
+ 1. Install [AUTOMATIC1111's webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui/).
17
+
18
+ 2. Now two ways: either clone the repo into the `extensions` directory via git commandline launched within in the `stable-diffusion-webui` folder
19
+
20
+ ```sh
21
+ git clone https://github.com/deforum-art/sd-webui-deforum extensions/deforum
22
+ ```
23
+
24
+ Or download this repository, locate the `extensions` folder within your WebUI installation, create a folder named `deforum` and put the contents of the downloaded directory inside of it. Then restart WebUI.
25
+
26
+ 3. Open the webui, find the Deforum tab at the top of the page.
27
+
28
+ 4. Enter the animation settings. Refer to [this general guide](https://docs.google.com/document/d/1pEobUknMFMkn8F5TMsv8qRzamXX_75BShMMXV8IFslI/edit) and [this guide to math keyframing functions in Deforum](https://docs.google.com/document/d/1pfW1PwbDIuW0cv-dnuyYj1UzPqe23BlSLTJsqazffXM/edit?usp=sharing). However, **in this version prompt weights less than zero don't just like in original Deforum!** Split the positive and the negative prompt in the json section using --neg argument like this "apple:\`where(cos(t)>=0, cos(t), 0)\`, snow --neg strawberry:\`where(cos(t)<0, -cos(t), 0)\`"
29
+
30
+ 5. To view animation frames as they're being made, without waiting for the completion of an animation, go to the 'Settings' tab and set the value of this toolbar **above zero**. Warning: it may slow down the generation process.
31
+
32
+ ![adsdasunknown](https://user-images.githubusercontent.com/14872007/196064311-1b79866a-e55b-438a-84a7-004ff30829ad.png)
33
+
34
+
35
+ 6. Run the script and see if you got it working or even got something. **In 3D mode a large delay is expected at first** as the script loads the depth models. In the end, using the default settings the whole thing should consume 6.4 GBs of VRAM at 3D mode peaks and no more than 3.8 GB VRAM in 3D mode if you launch the webui with the '--lowvram' command line argument.
36
+
37
+ 7. After the generation process is completed, click the button with the self-describing name to show the video or gif result right in the GUI!
38
+
39
+ 8. Join our Discord where you can post generated stuff, ask questions and more: https://discord.gg/deforum. <br>
40
+ * There's also the 'Issues' tab in the repo, for well... reporting issues ;)
41
+
42
+ 9. Profit!
43
+
44
+ ## Known issues
45
+
46
+ * This port is not fully backward-compatible with the notebook and the local version both due to the changes in how AUTOMATIC1111's webui handles Stable Diffusion models and the changes in this script to get it to work in the new environment. *Expect* that you may not get exactly the same result or that the thing may break down because of the older settings.
47
+
48
+ ## Screenshots
49
+
50
+ Amazing raw Deforum animation by [Pxl.Pshr](https://www.instagram.com/pxl.pshr):
51
+ * Turn Audio ON!
52
+
53
+ (Audio credits: SKRILLEX, FRED AGAIN & FLOWDAN - RUMBLE (PHACE'S DNB FLIP))
54
+
55
+ https://user-images.githubusercontent.com/121192995/224450647-39529b28-be04-4871-bb7a-faf7afda2ef2.mp4
56
+
57
+ Setting file of that video: [here](https://github.com/deforum-art/sd-webui-deforum/files/11353167/PxlPshrWinningAnimationSettings.txt).
58
+
59
+ <br>
60
+
61
+ Main extension tab:
62
+
63
+ ![image](https://user-images.githubusercontent.com/121192995/226101131-43bf594a-3152-45dd-a5d1-2538d0bc221d.png)
64
+
65
+ Keyframes tab:
66
+
67
+ ![image](https://user-images.githubusercontent.com/121192995/226101140-bfe6cce7-9b78-4a1d-be9a-43e1fc78239e.png)
68
+
69
+ ## License
70
+
71
+ This program is distributed under the terms of the GNU Affero Public License v3.0, copyright (c) 2023 Deforum LLC.
72
+
73
+ Some of its sublicensed integrated 3rd party components may have other licenses, see LICENSE for usage terms.
extensions-builtin/sd-webui-deforum/install.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import launch
18
+ import os
19
+
20
+ req_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "requirements.txt")
21
+
22
+ with open(req_file) as file:
23
+ for lib in file:
24
+ lib = lib.strip()
25
+ if not launch.is_installed(lib):
26
+ launch.run_pip(f"install {lib}", f"Deforum requirement: {lib}")
extensions-builtin/sd-webui-deforum/javascript/deforum-hints.js ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ # Copyright (C) 2023 Deforum LLC
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU Affero General Public License as published by
6
+ # the Free Software Foundation, version 3 of the License.
7
+ #
8
+ # This program is distributed in the hope that it will be useful,
9
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
+ # GNU General Public License for more details.
12
+ #
13
+ # You should have received a copy of the GNU Affero General Public License
14
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
15
+
16
+ Contact the authors: https://deforum.github.io/
17
+ */
18
+
19
+ // mouseover tooltips for various UI elements
20
+
21
+ deforum_titles = {
22
+ //Run
23
+ "Override settings": "specify a custom settings file and ignore settings displayed in the interface",
24
+ "Custom settings file": "the path to a custom settings file",
25
+ "Width": "The width of the output images, in pixels (must be a multiple of 64)",
26
+ "Height": "The height of the output images, in pixels (must be a multiple of 64)",
27
+ "Restore faces": "Restore low quality faces using GFPGAN neural network",
28
+ "Tiling": "Produce an image that can be tiled.",
29
+ "Highres. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
30
+ "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result",
31
+ "Sampler": "Which algorithm to use to produce the image",
32
+ "Enable extras": "enable additional seed settings",
33
+ "Subseed": "Seed of a different picture to be mixed into the generation.",
34
+ "Subseed strength": "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).",
35
+ "Resize seed from width": "Normally, changing the resolution will completely change an image, even when using the same seed. If you generated an image with a particular seed and then changed the resolution, put the original resolution here to get an image that more closely resemles the original",
36
+ "Resize seed from height": "Normally, changing the resolution will completely change an image, even when using the same seed. If you generated an image with a particular seed and then changed the resolution, put the original resolution here to get an image that more closely resemles the original",
37
+ "Steps": "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results",
38
+ "Batch name": "output images will be placed in a folder with this name ({timestring} token will be replaced) inside the img2img output folder. Supports placeholders like {seed}, {w}, {h}, {prompts} and more",
39
+ "Pix2Pix img CFG schedule": "*Only in use with pix2pix checkpoints!*",
40
+ "Filename format": "specify the format of the filename for output images",
41
+ "Seed behavior": "defines the seed behavior that is used for animations",
42
+ "iter": "the seed value will increment by 1 for each subsequent frame of the animation",
43
+ "fixed": "the seed will remain fixed across all frames of animation. **NOT RECOMMENDED.** Unless you know what you are doing, it will *deep fry* the pictures over time",
44
+ "random": "a random seed will be used on each frame of the animation",
45
+ "schedule": "specify your own seed schedule",
46
+ "Seed iter N":"controls for how many frames the same seed should stick before iterating to the next one",
47
+ //Keyframes
48
+ "Animation mode": "selects the type of animation",
49
+ "2D": "only 2D motion parameters will be used, but this mode uses the least amount of VRAM. You can optionally enable flip_2d_perspective to enable some psuedo-3d animation parameters while in 2D mode.",
50
+ "3D": "enables all 3D motion parameters.",
51
+ "Video Input": "will ignore all motion parameters and attempt to reference a video loaded into the runtime, specified by the video_init_path. Max_frames is ignored during video_input mode, and instead, follows the number of frames pulled from the video’s length. Resume_from_timestring is NOT available with Video_Input mode.",
52
+ "Max frames": "the maximum number of output images to be created",
53
+ "Border": "controls handling method of pixels to be generated when the image is smaller than the frame.",
54
+ "wrap": "pulls pixels from the opposite edge of the image",
55
+ "replicate": "repeats the edge of the pixels, and extends them. Animations with quick motion may yield lines where this border function was attempting to populate pixels into the empty space created.",
56
+ "Zoom": "2D operator that scales the canvas size, multiplicatively. [static = 1.0]",
57
+ "Angle": "2D operator to rotate canvas clockwise/anticlockwise in degrees per frame",
58
+ "Transform Center X": "x center axis for 2D angle/zoom *only*",
59
+ "Transform Center Y": "y center axis for 2D angle/zoom *only*",
60
+ "Translation X": "2D & 3D operator to move canvas left/right in pixels per frame",
61
+ "Translation Y": "2D & 3D operator to move canvas up/down in pixels per frame",
62
+ "Translation Z": "3D operator to move canvas towards/away from view [speed set by FOV]",
63
+ "Rotation 3D X": "3D operator to tilt canvas up/down in degrees per frame",
64
+ "Rotation 3D Y": "3D operator to pan canvas left/right in degrees per frame",
65
+ "Rotation 3D Z": "3D operator to roll canvas clockwise/anticlockwise",
66
+ "Enable perspective flip": "enables 2D mode functions to simulate faux 3D movement",
67
+ "Perspective flip theta": "the roll effect angle",
68
+ "Perspective flip phi": "the tilt effect angle",
69
+ "Perspective flip gamma": "the pan effect angle",
70
+ "Perspective flip fv": "the 2D vanishing point of perspective (recommended range 30-160)",
71
+ "Noise schedule": "amount of graininess to add per frame for diffusion diversity",
72
+ "Strength schedule": "amount of presence of previous frame to influence next frame, also controls steps in the following formula [steps - (strength_schedule * steps)]",
73
+ "Sampler schedule": "controls which sampler to use at a specific scheduled frame",
74
+ "Contrast schedule": "adjusts the overall contrast per frame [default neutral at 1.0]",
75
+ "CFG scale schedule": "how closely the image should conform to the prompt. Lower values produce more creative results. (recommended range 5-15)",
76
+ "FOV schedule": "adjusts the scale at which the canvas is moved in 3D by the translation_z value. [maximum range -180 to +180, with 0 being undefined. Values closer to 180 will make the image have less depth, while values closer to 0 will allow more depth]",
77
+ "Aspect Ratio schedule": "adjusts the aspect ratio for the depth calculation (normally 1)",
78
+ //"near_schedule": "",
79
+ //"far_schedule": "",
80
+ "Seed schedule": "allows you to specify seeds at a specific schedule, if seed_behavior is set to schedule.",
81
+ "Color coherence": "The color coherence will attempt to sample the overall pixel color information, and trend those values analyzed in the first frame to be applied to future frames.",
82
+ // "None": "Disable color coherence",
83
+ "HSV": "HSV is a good method for balancing presence of vibrant colors, but may produce unrealistic results - (ie.blue apples)",
84
+ "LAB": "LAB is a more linear approach to mimic human perception of color space - a good default setting for most users.",
85
+ "RGB": "RGB is good for enforcing unbiased amounts of color in each red, green and blue channel - some images may yield colorized artifacts if sampling is too low.",
86
+ "Legacy colormatch": "applies the colormatch only before the video noising, resulting in graying the video over time, use it for backwards compatibility",
87
+ "Cadence": "A setting of 1 will cause every frame to receive diffusion in the sequence of image outputs. A setting of 2 will only diffuse on every other frame, yet motion will still be in effect. The output of images during the cadence sequence will be automatically blended, additively and saved to the specified drive. This may improve the illusion of coherence in some workflows as the content and context of an image will not change or diffuse during frames that were skipped. Higher values of 4-8 cadence will skip over a larger amount of frames and only diffuse the “Nth” frame as set by the diffusion_cadence value. This may produce more continuity in an animation, at the cost of little opportunity to add more diffused content. In extreme examples, motion within a frame will fail to produce diverse prompt context, and the space will be filled with lines or approximations of content - resulting in unexpected animation patterns and artifacts. Video Input & Interpolation modes are not affected by diffusion_cadence.",
88
+ "Optical flow cadence": "Optional method for optical flow used to blend frames during cadence in 3D animation mode (if cadence more than 1).",
89
+ "Optical flow redo generation": "This option takes twice as long because it generates twice in order to capture the optical flow from the previous image to the first generation, then warps the previous image and redoes the generation. Works in 2D/3D animation modes.",
90
+ "Redo": "Diffusion Redo. This option renders N times before the final render. It is suggested to lower your steps if you up your redo. Seed is randomized during redo generations and restored afterwards.",
91
+ "Noise type": "Selects the type of noise being added to each frame",
92
+ "uniform": "Uniform noise covers the entire frame. It somewhat flattens and sharpens the video over time, but may be good for cartoonish look. This is the old default setting.",
93
+ "perlin": "Perlin noise is a more natural looking noise. It is heterogeneous and less sharp than uniform noise, this way it is more likely that new details will appear in a more coherent way. This is the new default setting.",
94
+ "Perlin W": "The width of the Perlin sample. Lower values will make larger noise regions. Think of it as inverse brush stroke width. The greater this setting, the smaller details it will affect.",
95
+ "Perlin H": "The height of the Perlin sample. Lower values will make larger noise regions. Think of it as inverse brush stroke width. The greater this setting, the smaller details it will affect.",
96
+ "Perlin octaves": "The number of Perlin noise octaves, that is the count of P-noise iterations. Higher values will make the noise more soft and smoke-like, whereas lower values will make it look more organic and spotty. It is limited by 8 octaves as the resulting gain will run out of bounds.",
97
+ "Perlin persistence": "How much of noise from each octave is added on each iteration. Higher values will make it more straighter and sharper, while lower values will make it rounder and smoother. It is limited by 1.0 as the resulting gain fill the frame completely with noise.",
98
+ "Use depth warping": "enables instructions to warp an image dynamically in 3D mode only.",
99
+ "MiDaS weight": "sets a midpoint at which a depthmap is to be drawn: range [-1 to +1]",
100
+ "Padding mode": "instructs the handling of pixels outside the field of view as they come into the scene.",
101
+ //"border": "Border will attempt to use the edges of the canvas as the pixels to be drawn", //duplicate name as another property
102
+ "reflection": "reflection will attempt to approximate the image and tile/repeat pixels",
103
+ "zeros": "zeros will not add any new pixel information",
104
+ "Sampling Mode": "choose from Bicubic, Bilinear or Nearest modes. (Recommended: Bicubic)",
105
+ "Save depth maps": "will output a greyscale depth map image alongside the output images.",
106
+
107
+ // Prompts
108
+ "Prompts": "prompts for your animation in a JSON format. Use --neg words to add 'words' as negative prompt",
109
+ "Prompts positive": "positive prompt to be appended to *all* prompts",
110
+ "Prompts negative": "negative prompt to be appended to *all* prompts. DON'T use --neg here!",
111
+
112
+ //Init
113
+ "Use init": "Diffuse the first frame based on an image, similar to img2img.",
114
+ "Strength": "Controls the strength of the diffusion on the init image. 0 = disabled",
115
+ "Strength 0 no init": "Set the strength to 0 automatically when no init image is used",
116
+ "Init image": "the path to your init image",
117
+ "Use mask": "Use a grayscale image as a mask on your init image. Whiter areas of the mask are areas that change more.",
118
+ "Use alpha as mask": "use the alpha channel of the init image as the mask",
119
+ "Mask file": "the path to your mask image",
120
+ "Invert mask": "Inverts the colors of the mask",
121
+ "Mask brightness adjust": "adjust the brightness of the mask. Should be a positive number, with 1.0 meaning no adjustment.",
122
+ "Mask contrast adjust": "adjust the brightness of the mask. Should be a positive number, with 1.0 meaning no adjustment.",
123
+ "overlay mask": "Overlay the masked image at the end of the generation so it does not get degraded by encoding and decoding",
124
+ "Mask overlay blur": "Blur edges of final overlay mask, if used. Minimum = 0 (no blur)",
125
+ "Video init path": "the directory \/ URL at which your video file is located for Video Input mode only",
126
+ "Extract nth frame": "during the run sequence, only frames specified by this value will be extracted, saved, and diffused upon. A value of 1 indicates that every frame is to be accounted for. Values of 2 will use every other frame for the sequence. Higher values will skip that number of frames respectively.",
127
+ "Extract from frame":"start extracting the input video only from this frame number",
128
+ "Extract to frame": "stop the extraction of the video at this frame number. -1 for no limits",
129
+ "Overwrite extracted frames": "when enabled, will re-extract video frames each run. When using video_input mode, the run will be instructed to write video frames to the drive. If you’ve already populated the frames needed, uncheck this box to skip past redundant extraction, and immediately start the render. If you have not extracted frames, you must run at least once with this box checked to write the necessary frames.",
130
+ "Use mask video": "video_input mode only, enables the extraction and use of a separate video file intended for use as a mask. White areas of the extracted video frames will not be affected by diffusion, while black areas will be fully effected. Lighter/darker areas are affected dynamically.",
131
+ "Video mask path": "the directory in which your mask video is located.",
132
+ "Interpolate key frames": "selects whether to ignore prompt schedule or _x_frames.",
133
+ "Interpolate x frames": "the number of frames to transition thru between prompts (when interpolate_key_frames = true, then the numbers in front of the animation prompts will dynamically guide the images based on their value. If set to false, will ignore the prompt numbers and force interpole_x_frames value regardless of prompt number)",
134
+ "Resume from timestring": "instructs the run to start from a specified point",
135
+ "Resume timestring": "the required timestamp to reference when resuming. Currently only available in 2D & 3D mode, the timestamp is saved as the settings .txt file name as well as images produced during your previous run. The format follows: yyyymmddhhmmss - a timestamp of when the run was started to diffuse.",
136
+
137
+ //Video Output
138
+ "Skip video creation": "when checked, do not output a video",
139
+ "Make GIF": "create a gif in addition to .mp4 file. supports up to 30 fps, will self-disable at higher fps values",
140
+ "Upscale":"upscale the images of the next run once it's finished + make a video out of them",
141
+ "Upscale model":"model of the upscaler to use. 'realesr-animevideov3' is much faster but yields smoother, less detailed results. the other models only do x4",
142
+ "Upscale factor":"how many times to upscale, actual options depend on the chosen upscale model",
143
+ "FPS": "The frames per second that the video will run at",
144
+ "Output format": "select the type of video file to output",
145
+ "PIL gif": "create an animated GIF",
146
+ "FFMPEG mp4": "create an MP4 video file",
147
+ "FFmpeg location": "the path to where ffmpeg is located. Leave at default 'ffmpeg' if ffmpeg is in your PATH!",
148
+ "FFmpeg crf": "controls quality where lower is better, less compressed. values: 0 to 51, default 17",
149
+ "FFmpeg preset": "controls how good the compression is, and the operation speed. If you're not in a rush keep it at 'veryslow'",
150
+ "Add soundtrack": "when this box is checked, and FFMPEG mp4 is selected as the output format, an audio file will be multiplexed with the video.",
151
+ "Soundtrack path": "the path\/ URL to an audio file to accompany the video",
152
+ "Use manual settings": "when this is unchecked, the video will automatically be created in the same output folder as the images. Check this box to specify different settings for the creation of the video, specified by the following options",
153
+ "Render steps": "render each step of diffusion as a separate frame",
154
+ "Max video frames": "the maximum number of frames to include in the video, when use_manual_settings is checked",
155
+ "Image path": "the location of images to create the video from, when use_manual_settings is checked",
156
+ "MP4 path": "the output location of the mp4 file, when use_manual_settings is checked",
157
+ "Delete Imgs": "if enabled, raw imgs will be deleted after a successful video/ videos (upsacling, interpolation, gif) creation",
158
+ "Engine": "choose the frame interpolation engine and version",
159
+ "Interp X":"how many times to interpolate the source video. e.g source video fps of 12 and a value of x2 will yield a 24fps interpolated video",
160
+ "Slow-Mo X":"how many times to slow-down the video. *Naturally affects output fps as well",
161
+ "Keep Imgs": "delete or keep raw affected (interpolated/ upscaled depending on the UI section) png imgs",
162
+ "Interpolate an existing video":"This feature allows you to interpolate any video with a dedicated button. Video could be completly unrelated to deforum",
163
+ "In Frame Count": "uploaded video total frame count",
164
+ "In FPS":"uploaded video FPS",
165
+ "Interpolated Vid FPS":"calculated output-interpolated video FPS",
166
+ "In Res":"uploaded video resolution",
167
+ "Out Res":"output video resolution",
168
+
169
+ // Looper Args
170
+ // "use_looper": "",
171
+ "Enable guided images mode": "check this box to enable guided images mode",
172
+ "Images to use for keyframe guidance": "images you iterate over, you can do local or web paths (no single backslashes!)",
173
+ "Image strength schedule": "how much the image should look like the previou one and new image frame init. strength schedule might be better if this is higher, around .75 during the keyfames you want to switch on",
174
+ "Blend factor max": "blendFactor = blendFactorMax - blendFactorSlope * cos((frame % tweening_frames_schedule) / (tweening_frames_schedule / 2))",
175
+ "Blend factor slope": "blendFactor = blendFactorMax - blendFactorSlope * cos((frame % tweening_frames_schedule) / (tweening_frames_schedule / 2))",
176
+ "Tweening frames schedule": "number of the frames that we will blend between current imagined image and input frame image",
177
+ "Color correction factor": "how close to get to the colors of the input frame image/ the amount each frame during a tweening step to use the new images colors",
178
+ // deforum.py / right side of the ui:
179
+ "Settings File": "Path to settings file you want to load. Path can be relative to webui folder OR full - absolute",
180
+
181
+ // Hybrid Video
182
+ "Generate inputframes": "Initiates extraction of video frames from your video_init_path to the inputframes folder. You only need to do this once and then you can change it to False and re-render",
183
+ "Hybrid composite": "Engages hybrid compositing of video into animation in various ways with comp alpha as a master mix control.",
184
+ "Use init image as video": "Use init image instead of video. Doesn't require generation of inputframes.",
185
+ "First Frame as init image": "If True, uses the first frame of the video as the init_image. False can create interesting transition effects into the video, depending on settings.",
186
+ "Motion use prev img": "If enabled, changes the behavior or hybrid_motion to captures motion by comparing the current video frame to the previous rendered image, instead of the previous video frame.",
187
+ "Hybrid motion": "Analyzes video frames for camera motion and applies movement to render.",
188
+ "Flow method": "Selects the type of Optical Flow to use if Optical Flow is selected in Hybrid motion.",
189
+ "Comp mask type": "You don't need a mask to composite video. But, Mask types can control the way that video is composited with the previous image each frame.",
190
+ "Comp mask equalize": "Equalizes the mask for the composite before or after autocontrast operation (or both)",
191
+ "Comp mask auto contrast": "Auto-contrasts the mask for the composite. If enabled, uses the low/high autocontrast cutoff schedules.",
192
+ "Comp mask inverse": "Inverts the composite mask.",
193
+ "Comp save extra frames": "If this option is selected, many extra frames will be output for the various processes into the hybridframes folder.",
194
+ "Comp alpha schedule": "Schedule controls how much the composite video is mixed in, whether set to mask is None or using a mask. This is the master mix.",
195
+ "Flow factor schedule": "Affects optical flow hybrid motion. 1 is normal flow. -1 is negative flow. 0.5 is half flow, etc...",
196
+ "Comp mask blend alpha schedule": "If using a blend mask, this controls the blend amount of the video and render for the composite mask.",
197
+ "Comp mask contrast schedule": "Controls the contrast of the composite mask. 0.5 if half, 1 is normal contrast, 2 is double, etc.",
198
+ "Comp mask auto contrast cutoff high schedule": "If using autocontrast option, this is the high cutoff for the operation.",
199
+ "Comp mask auto contrast cutoff low schedule": "If using autocontrast option, this is the low cutoff for the operation.",
200
+ "Generate human masks": "This will generate masks of all the humans in a video. Created at generation of hybrid video. Not yet integrated for auto-masking, but it will create the masks, and you can then use the mask video manually.",
201
+ }
202
+
203
+ onUiUpdate(function(){
204
+ gradioApp().querySelectorAll('span, button, select, p').forEach(function(span){
205
+ tooltip = deforum_titles[span.textContent];
206
+
207
+ if(!tooltip){
208
+ tooltip = deforum_titles[span.value];
209
+ }
210
+
211
+ if(!tooltip){
212
+ for (const c of span.classList) {
213
+ if (c in deforum_titles) {
214
+ tooltip = deforum_titles[c];
215
+ break;
216
+ }
217
+ }
218
+ }
219
+
220
+ if(tooltip){
221
+ span.title = tooltip;
222
+ }
223
+ })
224
+
225
+ gradioApp().querySelectorAll('select').forEach(function(select){
226
+ if (select.onchange != null) return;
227
+
228
+ select.onchange = function(){
229
+ select.title = deforum_titles[select.value] || "";
230
+ }
231
+ })
232
+ })
extensions-builtin/sd-webui-deforum/javascript/deforum.js ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ # Copyright (C) 2023 Deforum LLC
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU Affero General Public License as published by
6
+ # the Free Software Foundation, version 3 of the License.
7
+ #
8
+ # This program is distributed in the hope that it will be useful,
9
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
+ # GNU General Public License for more details.
12
+ #
13
+ # You should have received a copy of the GNU Affero General Public License
14
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
15
+
16
+ Contact the authors: https://deforum.github.io/
17
+ */
18
+
19
+ function submit_deforum(){
20
+ rememberGallerySelection('deforum_gallery')
21
+ showSubmitButtons('deforum', false)
22
+
23
+ var id = randomId()
24
+ requestProgress(id, gradioApp().getElementById('deforum_gallery_container'), gradioApp().getElementById('deforum_gallery'), function(){
25
+ showSubmitButtons('deforum', true)
26
+ })
27
+
28
+ var res = create_submit_args(arguments)
29
+
30
+ res[0] = id
31
+
32
+ return res
33
+ }
extensions-builtin/sd-webui-deforum/preload.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 'Deforum' plugin for Automatic1111's Stable Diffusion WebUI.
2
+ # Copyright (C) 2023 Deforum LLC
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU Affero General Public License as published by
6
+ # the Free Software Foundation, version 3 of the License.
7
+ #
8
+ # This program is distributed in the hope that it will be useful,
9
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
+ # GNU General Public License for more details.
12
+ #
13
+ # You should have received a copy of the GNU Affero General Public License
14
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
15
+
16
+ # Contact the authors: https://deforum.github.io/
17
+
18
+ def preload(parser):
19
+ parser.add_argument(
20
+ "--deforum-api",
21
+ action="store_true",
22
+ help="Enable the Deforum API",
23
+ default=None,
24
+ )
25
+ parser.add_argument(
26
+ "--deforum-simple-api",
27
+ action="store_true",
28
+ help="Enable the simplified version of Deforum API",
29
+ default=None,
30
+ )
31
+ parser.add_argument(
32
+ "--deforum-run-now",
33
+ type=str,
34
+ help="Comma-delimited list of deforum settings files to run immediately on startup",
35
+ default=None,
36
+ )
37
+ parser.add_argument(
38
+ "--deforum-terminate-after-run-now",
39
+ action="store_true",
40
+ help="Whether to shut down the a1111 process immediately after completing the generations passed in to '--deforum-run-now'.",
41
+ default=None,
42
+ )
extensions-builtin/sd-webui-deforum/pytest.ini ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [pytest]
2
+ filterwarnings =
3
+ ignore::DeprecationWarning
extensions-builtin/sd-webui-deforum/requirements-dev.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ coverage
2
+ syrupy
3
+ pytest
4
+ tenacity
5
+ pydantic_requests
6
+ moviepy
extensions-builtin/sd-webui-deforum/requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numexpr
2
+ matplotlib
3
+ pandas
4
+ av
5
+ pims
6
+ imageio_ffmpeg
7
+ rich
8
+ gdown
extensions-builtin/sd-webui-deforum/scripts/default_settings.txt ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "W": 512,
3
+ "H": 512,
4
+ "show_info_on_ui": true,
5
+ "tiling": false,
6
+ "restore_faces": false,
7
+ "seed_resize_from_w": 0,
8
+ "seed_resize_from_h": 0,
9
+ "seed": -1,
10
+ "sampler": "Euler a",
11
+ "steps": 25,
12
+ "batch_name": "Deforum_20230812221310",
13
+ "seed_behavior": "iter",
14
+ "seed_iter_N": 1,
15
+ "use_init": false,
16
+ "strength": 0.8,
17
+ "strength_0_no_init": true,
18
+ "init_image": null,
19
+ "use_mask": false,
20
+ "use_alpha_as_mask": false,
21
+ "mask_file": "https://deforum.github.io/a1/M1.jpg",
22
+ "invert_mask": false,
23
+ "mask_contrast_adjust": 1.0,
24
+ "mask_brightness_adjust": 1.0,
25
+ "overlay_mask": true,
26
+ "mask_overlay_blur": 4,
27
+ "fill": 0,
28
+ "full_res_mask": true,
29
+ "full_res_mask_padding": 4,
30
+ "reroll_blank_frames": "ignore",
31
+ "reroll_patience": 10.0,
32
+ "motion_preview_mode": false,
33
+ "prompts": {
34
+ "0": " tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, 4k, landscape --neg nsfw, nude",
35
+ "30": " anthropomorphic clean cat, surrounded by mandelbulb fractals, epic angle and pose, symmetrical, 3d, depth of field --neg nsfw, nude",
36
+ "60": " a beautiful coconut --neg photo, realistic nsfw, nude",
37
+ "90": " a beautiful durian, amazing award winning photography --neg nsfw, nude"
38
+ },
39
+ "positive_prompts": "",
40
+ "negative_prompts": "nsfw, nude",
41
+ "animation_mode": "3D",
42
+ "max_frames": 120,
43
+ "border": "replicate",
44
+ "angle": "0: (0)",
45
+ "zoom": "0: (1.0025+0.002*sin(1.25*3.14*t/30))",
46
+ "translation_x": "0: (0)",
47
+ "translation_y": "0: (0)",
48
+ "translation_z": "0: (1.75)",
49
+ "transform_center_x": "0: (0.5)",
50
+ "transform_center_y": "0: (0.5)",
51
+ "rotation_3d_x": "0: (0)",
52
+ "rotation_3d_y": "0: (0)",
53
+ "rotation_3d_z": "0: (0)",
54
+ "enable_perspective_flip": false,
55
+ "perspective_flip_theta": "0: (0)",
56
+ "perspective_flip_phi": "0: (0)",
57
+ "perspective_flip_gamma": "0: (0)",
58
+ "perspective_flip_fv": "0: (53)",
59
+ "noise_schedule": "0: (0.065)",
60
+ "strength_schedule": "0: (0.65)",
61
+ "contrast_schedule": "0: (1.0)",
62
+ "cfg_scale_schedule": "0: (7)",
63
+ "enable_steps_scheduling": false,
64
+ "steps_schedule": "0: (25)",
65
+ "fov_schedule": "0: (70)",
66
+ "aspect_ratio_schedule": "0: (1)",
67
+ "aspect_ratio_use_old_formula": false,
68
+ "near_schedule": "0: (200)",
69
+ "far_schedule": "0: (10000)",
70
+ "seed_schedule": "0:(s), 1:(-1), \"max_f-2\":(-1), \"max_f-1\":(s)",
71
+ "pix2pix_img_cfg_scale_schedule": "0:(1.5)",
72
+ "enable_subseed_scheduling": false,
73
+ "subseed_schedule": "0: (1)",
74
+ "subseed_strength_schedule": "0: (0)",
75
+ "enable_sampler_scheduling": false,
76
+ "sampler_schedule": "0: (\"Euler a\")",
77
+ "use_noise_mask": false,
78
+ "mask_schedule": "0: (\"{video_mask}\")",
79
+ "noise_mask_schedule": "0: (\"{video_mask}\")",
80
+ "enable_checkpoint_scheduling": false,
81
+ "checkpoint_schedule": "0: (\"model1.ckpt\"), 100: (\"model2.safetensors\")",
82
+ "enable_clipskip_scheduling": false,
83
+ "clipskip_schedule": "0: (2)",
84
+ "enable_noise_multiplier_scheduling": true,
85
+ "noise_multiplier_schedule": "0: (1.05)",
86
+ "resume_from_timestring": false,
87
+ "resume_timestring": "20230129210106",
88
+ "enable_ddim_eta_scheduling": false,
89
+ "ddim_eta_schedule": "0: (0)",
90
+ "enable_ancestral_eta_scheduling": false,
91
+ "ancestral_eta_schedule": "0: (1)",
92
+ "amount_schedule": "0: (0.1)",
93
+ "kernel_schedule": "0: (5)",
94
+ "sigma_schedule": "0: (1)",
95
+ "threshold_schedule": "0: (0)",
96
+ "color_coherence": "LAB",
97
+ "color_coherence_image_path": "",
98
+ "color_coherence_video_every_N_frames": 1,
99
+ "color_force_grayscale": false,
100
+ "legacy_colormatch": false,
101
+ "diffusion_cadence": 2,
102
+ "optical_flow_cadence": "None",
103
+ "cadence_flow_factor_schedule": "0: (1)",
104
+ "optical_flow_redo_generation": "None",
105
+ "redo_flow_factor_schedule": "0: (1)",
106
+ "diffusion_redo": "0",
107
+ "noise_type": "perlin",
108
+ "perlin_octaves": 4,
109
+ "perlin_persistence": 0.5,
110
+ "use_depth_warping": true,
111
+ "depth_algorithm": "Midas-3-Hybrid",
112
+ "midas_weight": 0.2,
113
+ "padding_mode": "border",
114
+ "sampling_mode": "bicubic",
115
+ "save_depth_maps": false,
116
+ "video_init_path": "https://deforum.github.io/a1/V1.mp4",
117
+ "extract_nth_frame": 1,
118
+ "extract_from_frame": 0,
119
+ "extract_to_frame": -1,
120
+ "overwrite_extracted_frames": false,
121
+ "use_mask_video": false,
122
+ "video_mask_path": "https://deforum.github.io/a1/VM1.mp4",
123
+ "hybrid_comp_alpha_schedule": "0:(0.5)",
124
+ "hybrid_comp_mask_blend_alpha_schedule": "0:(0.5)",
125
+ "hybrid_comp_mask_contrast_schedule": "0:(1)",
126
+ "hybrid_comp_mask_auto_contrast_cutoff_high_schedule": "0:(100)",
127
+ "hybrid_comp_mask_auto_contrast_cutoff_low_schedule": "0:(0)",
128
+ "hybrid_flow_factor_schedule": "0:(1)",
129
+ "hybrid_generate_inputframes": false,
130
+ "hybrid_generate_human_masks": "None",
131
+ "hybrid_use_first_frame_as_init_image": true,
132
+ "hybrid_motion": "None",
133
+ "hybrid_motion_use_prev_img": false,
134
+ "hybrid_flow_consistency": false,
135
+ "hybrid_consistency_blur": 2,
136
+ "hybrid_flow_method": "RAFT",
137
+ "hybrid_composite": "None",
138
+ "hybrid_use_init_image": false,
139
+ "hybrid_comp_mask_type": "None",
140
+ "hybrid_comp_mask_inverse": false,
141
+ "hybrid_comp_mask_equalize": "None",
142
+ "hybrid_comp_mask_auto_contrast": false,
143
+ "hybrid_comp_save_extra_frames": false,
144
+ "parseq_manifest": "",
145
+ "parseq_use_deltas": true,
146
+ "use_looper": false,
147
+ "init_images": "{\n \"0\": \"https://deforum.github.io/a1/Gi1.png\",\n \"max_f/4-5\": \"https://deforum.github.io/a1/Gi2.png\",\n \"max_f/2-10\": \"https://deforum.github.io/a1/Gi3.png\",\n \"3*max_f/4-15\": \"https://deforum.github.io/a1/Gi4.jpg\",\n \"max_f-20\": \"https://deforum.github.io/a1/Gi1.png\"\n}",
148
+ "image_strength_schedule": "0:(0.75)",
149
+ "blendFactorMax": "0:(0.35)",
150
+ "blendFactorSlope": "0:(0.25)",
151
+ "tweening_frames_schedule": "0:(20)",
152
+ "color_correction_factor": "0:(0.075)",
153
+ "cn_1_overwrite_frames": true,
154
+ "cn_1_vid_path": "",
155
+ "cn_1_mask_vid_path": "",
156
+ "cn_1_enabled": false,
157
+ "cn_1_low_vram": false,
158
+ "cn_1_pixel_perfect": false,
159
+ "cn_1_module": "none",
160
+ "cn_1_model": "None",
161
+ "cn_1_weight": "0:(1)",
162
+ "cn_1_guidance_start": "0:(0.0)",
163
+ "cn_1_guidance_end": "0:(1.0)",
164
+ "cn_1_processor_res": 64,
165
+ "cn_1_threshold_a": 64,
166
+ "cn_1_threshold_b": 64,
167
+ "cn_1_resize_mode": "Inner Fit (Scale to Fit)",
168
+ "cn_1_control_mode": "Balanced",
169
+ "cn_1_loopback_mode": false,
170
+ "cn_2_overwrite_frames": true,
171
+ "cn_2_vid_path": "",
172
+ "cn_2_mask_vid_path": "",
173
+ "cn_2_enabled": false,
174
+ "cn_2_low_vram": false,
175
+ "cn_2_pixel_perfect": false,
176
+ "cn_2_module": "none",
177
+ "cn_2_model": "None",
178
+ "cn_2_weight": "0:(1)",
179
+ "cn_2_guidance_start": "0:(0.0)",
180
+ "cn_2_guidance_end": "0:(1.0)",
181
+ "cn_2_processor_res": 64,
182
+ "cn_2_threshold_a": 64,
183
+ "cn_2_threshold_b": 64,
184
+ "cn_2_resize_mode": "Inner Fit (Scale to Fit)",
185
+ "cn_2_control_mode": "Balanced",
186
+ "cn_2_loopback_mode": false,
187
+ "cn_3_overwrite_frames": true,
188
+ "cn_3_vid_path": "",
189
+ "cn_3_mask_vid_path": "",
190
+ "cn_3_enabled": false,
191
+ "cn_3_low_vram": false,
192
+ "cn_3_pixel_perfect": false,
193
+ "cn_3_module": "none",
194
+ "cn_3_model": "None",
195
+ "cn_3_weight": "0:(1)",
196
+ "cn_3_guidance_start": "0:(0.0)",
197
+ "cn_3_guidance_end": "0:(1.0)",
198
+ "cn_3_processor_res": 64,
199
+ "cn_3_threshold_a": 64,
200
+ "cn_3_threshold_b": 64,
201
+ "cn_3_resize_mode": "Inner Fit (Scale to Fit)",
202
+ "cn_3_control_mode": "Balanced",
203
+ "cn_3_loopback_mode": false,
204
+ "cn_4_overwrite_frames": true,
205
+ "cn_4_vid_path": "",
206
+ "cn_4_mask_vid_path": "",
207
+ "cn_4_enabled": false,
208
+ "cn_4_low_vram": false,
209
+ "cn_4_pixel_perfect": false,
210
+ "cn_4_module": "none",
211
+ "cn_4_model": "None",
212
+ "cn_4_weight": "0:(1)",
213
+ "cn_4_guidance_start": "0:(0.0)",
214
+ "cn_4_guidance_end": "0:(1.0)",
215
+ "cn_4_processor_res": 64,
216
+ "cn_4_threshold_a": 64,
217
+ "cn_4_threshold_b": 64,
218
+ "cn_4_resize_mode": "Inner Fit (Scale to Fit)",
219
+ "cn_4_control_mode": "Balanced",
220
+ "cn_4_loopback_mode": false,
221
+ "cn_5_overwrite_frames": true,
222
+ "cn_5_vid_path": "",
223
+ "cn_5_mask_vid_path": "",
224
+ "cn_5_enabled": false,
225
+ "cn_5_low_vram": false,
226
+ "cn_5_pixel_perfect": false,
227
+ "cn_5_module": "none",
228
+ "cn_5_model": "None",
229
+ "cn_5_weight": "0:(1)",
230
+ "cn_5_guidance_start": "0:(0.0)",
231
+ "cn_5_guidance_end": "0:(1.0)",
232
+ "cn_5_processor_res": 64,
233
+ "cn_5_threshold_a": 64,
234
+ "cn_5_threshold_b": 64,
235
+ "cn_5_resize_mode": "Inner Fit (Scale to Fit)",
236
+ "cn_5_control_mode": "Balanced",
237
+ "cn_5_loopback_mode": false,
238
+ "skip_video_creation": false,
239
+ "fps": 15,
240
+ "make_gif": false,
241
+ "delete_imgs": false,
242
+ "delete_input_frames": false,
243
+ "add_soundtrack": "None",
244
+ "soundtrack_path": "https://deforum.github.io/a1/A1.mp3",
245
+ "r_upscale_video": false,
246
+ "r_upscale_factor": "x2",
247
+ "r_upscale_model": "realesr-animevideov3",
248
+ "r_upscale_keep_imgs": true,
249
+ "store_frames_in_ram": false,
250
+ "frame_interpolation_engine": "None",
251
+ "frame_interpolation_x_amount": 2,
252
+ "frame_interpolation_slow_mo_enabled": false,
253
+ "frame_interpolation_slow_mo_amount": 2,
254
+ "frame_interpolation_keep_imgs": true,
255
+ "frame_interpolation_use_upscaled": false,
256
+ "sd_model_name": "revAnimated_v122.safetensors",
257
+ "sd_model_hash": "3f4fefd9",
258
+ "deforum_git_commit_id": "eb16c856"
259
+ }
extensions-builtin/sd-webui-deforum/scripts/deforum.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import os
18
+
19
+ import modules.paths as ph
20
+ from modules import script_callbacks
21
+ from modules.shared import cmd_opts
22
+ from scripts.deforum_extend_paths import deforum_sys_extend
23
+
24
+
25
+ def init_deforum():
26
+ # use sys.path.extend to make sure all of our files are available for importation
27
+ deforum_sys_extend()
28
+
29
+ # create the Models/Deforum folder, where many of the deforum related models/ packages will be downloaded
30
+ os.makedirs(ph.models_path + '/Deforum', exist_ok=True)
31
+
32
+ # import our on_ui_tabs and on_ui_settings functions from the respected files
33
+ from deforum_helpers.ui_right import on_ui_tabs
34
+ from deforum_helpers.ui_settings import on_ui_settings
35
+
36
+ # trigger webui's extensions mechanism using our imported main functions -
37
+ # first to create the actual deforum gui, then to make the deforum tab in webui's settings section
38
+ script_callbacks.on_ui_tabs(on_ui_tabs)
39
+ script_callbacks.on_ui_settings(on_ui_settings)
40
+
41
+ init_deforum()
42
+
extensions-builtin/sd-webui-deforum/scripts/deforum_api.py ADDED
@@ -0,0 +1,485 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import os
18
+ import atexit
19
+ import json
20
+ import random
21
+ import tempfile
22
+ import traceback
23
+ import logging
24
+ import threading
25
+ from concurrent.futures import ThreadPoolExecutor
26
+ from dataclasses import dataclass, replace
27
+ from datetime import datetime
28
+ from typing import Any, Dict, List
29
+ from deforum_api_models import Batch, DeforumJobErrorType, DeforumJobStatusCategory, DeforumJobPhase, DeforumJobStatus
30
+ from contextlib import contextmanager
31
+ from deforum_extend_paths import deforum_sys_extend
32
+
33
+ import gradio as gr
34
+ from deforum_helpers.args import (DeforumAnimArgs, DeforumArgs,
35
+ DeforumOutputArgs, LoopArgs, ParseqArgs,
36
+ RootArgs, get_component_names)
37
+ from fastapi import FastAPI, Response, status
38
+
39
+ from modules.shared import cmd_opts, opts, state
40
+
41
+
42
+ log = logging.getLogger(__name__)
43
+ log_level = os.environ.get("DEFORUM_API_LOG_LEVEL") or os.environ.get("SD_WEBUI_LOG_LEVEL") or "INFO"
44
+ log.setLevel(log_level)
45
+ logging.basicConfig(
46
+ format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
47
+ datefmt='%Y-%m-%d %H:%M:%S',
48
+ )
49
+
50
+ def make_ids(job_count: int):
51
+ batch_id = f"batch({random.randint(0, 1e9)})"
52
+ job_ids = [f"{batch_id}-{i}" for i in range(job_count)]
53
+ return [batch_id, job_ids]
54
+
55
+
56
+ def get_default_value(name:str):
57
+ allArgs = RootArgs() | DeforumAnimArgs() | DeforumArgs() | LoopArgs() | ParseqArgs() | DeforumOutputArgs()
58
+ if name in allArgs and isinstance(allArgs[name], dict):
59
+ return allArgs[name].get("value", None)
60
+ elif name in allArgs:
61
+ return allArgs[name]
62
+ else:
63
+ return None
64
+
65
+
66
+ def run_deforum_batch(batch_id: str, job_ids: [str], deforum_settings_files: List[Any], opts_overrides: Dict[str, Any] = None):
67
+ log.info(f"Starting batch {batch_id} in thread {threading.get_ident()}.")
68
+ try:
69
+ with A1111OptionsOverrider(opts_overrides):
70
+ # Fill deforum args with default values.
71
+ # We are overriding everything with the batch files, but some values are eagerly validated, so must appear valid.
72
+ component_names = get_component_names()
73
+ prefixed_gradio_args = 2
74
+ expected_arg_count = prefixed_gradio_args + len(component_names)
75
+ run_deforum_args = [None] * expected_arg_count
76
+ for idx, name in enumerate(component_names):
77
+ run_deforum_args[prefixed_gradio_args + idx] = get_default_value(name)
78
+
79
+ # For some values, defaults don't pass validation...
80
+ run_deforum_args[prefixed_gradio_args + component_names.index('animation_prompts')] = '{"0":"dummy value"}'
81
+ run_deforum_args[prefixed_gradio_args + component_names.index('animation_prompts_negative')] = ''
82
+ run_deforum_args[prefixed_gradio_args + component_names.index('animation_prompts_positive')] = ''
83
+
84
+ # Arg 0 is a UID for the batch
85
+ run_deforum_args[0] = batch_id
86
+
87
+ # Setup batch override
88
+ run_deforum_args[prefixed_gradio_args + component_names.index('override_settings_with_file')] = True
89
+ run_deforum_args[prefixed_gradio_args + component_names.index('custom_settings_file')] = deforum_settings_files
90
+
91
+ # Cleanup old state from previously cancelled jobs
92
+ # WARNING: not thread safe because state is global. If we ever run multiple batches in parallel, this will need to be reworked.
93
+ state.skipped = False
94
+ state.interrupted = False
95
+
96
+ # Invoke deforum with appropriate args
97
+ from deforum_helpers.run_deforum import run_deforum
98
+ run_deforum(*run_deforum_args)
99
+
100
+ except Exception as e:
101
+ log.error(f"Batch {batch_id} failed: {e}")
102
+ traceback.print_exc()
103
+ for job_id in job_ids:
104
+ # Mark all jobs in this batch as failed
105
+ JobStatusTracker().fail_job(job_id, 'TERMINAL', {e})
106
+
107
+
108
+ # API to allow a batch of jobs to be submitted to the deforum pipeline.
109
+ # A batch is settings object OR a list of settings objects.
110
+ # A settings object is the JSON structure you can find in your saved settings.txt files.
111
+ #
112
+ # Request format:
113
+ # {
114
+ # "deforum_settings": [
115
+ # { ... settings object ... },
116
+ # { ... settings object ... },
117
+ # ]
118
+ # }
119
+ # OR:
120
+ # {
121
+ # "deforum_settings": { ... settings object ... }
122
+ # }
123
+ #
124
+ # Each settings object in the request represents a job to run as part of the batch.
125
+ # Each submitted batch will be given a batch ID which the user can use to query the status of all jobs in the batch.
126
+ #
127
+ def deforum_api(_: gr.Blocks, app: FastAPI):
128
+
129
+ deforum_sys_extend()
130
+
131
+ apiState = ApiState()
132
+
133
+ # Submit a new batch
134
+ @app.post("/deforum_api/batches")
135
+ async def run_batch(batch: Batch, response: Response):
136
+
137
+ # Extract the settings files from the request
138
+ deforum_settings_data = batch.deforum_settings
139
+ if not deforum_settings_data:
140
+ response.status_code = status.HTTP_400_BAD_REQUEST
141
+ return {"message": "No settings files provided. Please provide an element 'deforum_settings' of type list in the request JSON payload."}
142
+
143
+ if not isinstance(deforum_settings_data, list):
144
+ # Allow input deforum_settings to be top-level object as well as single object list
145
+ deforum_settings_data = [deforum_settings_data]
146
+
147
+ deforum_settings_tempfiles = []
148
+ for data in deforum_settings_data:
149
+ temp_file = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
150
+ json.dump(data, temp_file)
151
+ temp_file.close()
152
+ deforum_settings_tempfiles.append(temp_file)
153
+
154
+ job_count = len(deforum_settings_tempfiles)
155
+ [batch_id, job_ids] = make_ids(job_count)
156
+ apiState.submit_job(batch_id, job_ids, deforum_settings_tempfiles, batch.options_overrides)
157
+
158
+ for idx, job_id in enumerate(job_ids):
159
+ JobStatusTracker().accept_job(batch_id=batch_id, job_id=job_id, deforum_settings=deforum_settings_data[idx], options_overrides=batch.options_overrides)
160
+
161
+ response.status_code = status.HTTP_202_ACCEPTED
162
+ return {"message": "Job(s) accepted", "batch_id": batch_id, "job_ids": job_ids }
163
+
164
+ # List all batches and theit job ids
165
+ @app.get("/deforum_api/batches")
166
+ async def list_batches(id: str):
167
+ return JobStatusTracker().batches
168
+
169
+ # Show the details of all jobs in a batch
170
+ @app.get("/deforum_api/batches/{id}")
171
+ async def get_batch(id: str, response: Response):
172
+ jobsForBatch = JobStatusTracker().batches[id]
173
+ if not jobsForBatch:
174
+ response.status_code = status.HTTP_404_NOT_FOUND
175
+ return {"id": id, "status": "NOT FOUND"}
176
+ return [JobStatusTracker().get(job_id) for job_id in jobsForBatch]
177
+
178
+ # Cancel all jobs in a batch
179
+ @app.delete("/deforum_api/batches/{id}")
180
+ async def cancel_batch(id: str, response: Response):
181
+ jobsForBatch = JobStatusTracker().batches[id]
182
+ cancelled_jobs = []
183
+ if not jobsForBatch:
184
+ response.status_code = status.HTTP_404_NOT_FOUND
185
+ return {"id": id, "status": "NOT FOUND"}
186
+ for job_id in jobsForBatch:
187
+ try:
188
+ cancelled = _cancel_job(job_id)
189
+ if cancelled:
190
+ cancelled_jobs.append(job_id)
191
+ except:
192
+ log.warning(f"Failed to cancel job {job_id}")
193
+
194
+ return {"ids": cancelled_jobs, "message:": f"{len(cancelled_jobs)} job(s) cancelled." }
195
+
196
+ # Show details of all jobs across al batches
197
+ @app.get("/deforum_api/jobs")
198
+ async def list_jobs():
199
+ return JobStatusTracker().statuses
200
+
201
+ # Show details of a single job
202
+ @app.get("/deforum_api/jobs/{id}")
203
+ async def get_job(id: str, response: Response):
204
+ jobStatus = JobStatusTracker().get(id)
205
+ if not jobStatus:
206
+ response.status_code = status.HTTP_404_NOT_FOUND
207
+ return {"id": id, "status": "NOT FOUND"}
208
+ return jobStatus
209
+
210
+ # Cancel a single job
211
+ @app.delete("/deforum_api/jobs/{id}")
212
+ async def cancel_job(id: str, response: Response):
213
+ try:
214
+ if _cancel_job(id):
215
+ return {"id": id, "message": "Job cancelled."}
216
+ else:
217
+ response.status_code = status.HTTP_400_BAD_REQUEST
218
+ return {"id": id, "message": f"Job with ID {id} not in a cancellable state. Has it already finished?"}
219
+ except FileNotFoundError as e:
220
+ response.status_code = status.HTTP_404_NOT_FOUND
221
+ return {"id": id, "message": f"Job with ID {id} not found."}
222
+
223
+ # Shared logic for job cancellation
224
+ def _cancel_job(job_id:str):
225
+ jobStatus = JobStatusTracker().get(job_id)
226
+ if not jobStatus:
227
+ raise FileNotFoundError(f"Job {job_id} not found.")
228
+
229
+ if jobStatus.status != DeforumJobStatusCategory.ACCEPTED:
230
+ # Ignore jobs in completed state (error or success)
231
+ return False
232
+
233
+ if job_id in ApiState().submitted_jobs:
234
+ # Remove job from queue
235
+ ApiState().submitted_jobs[job_id].cancel()
236
+ if jobStatus.phase != DeforumJobPhase.QUEUED and jobStatus.phase != DeforumJobPhase.DONE:
237
+ # Job must be actively running - interrupt it.
238
+ # WARNING:
239
+ # - Possible race condition: if job_id just finished after the check and another started, we'll interrupt the wrong job.
240
+ # - Not thread safe because State object is global. Will break with concurrent jobs.
241
+ state.interrupt()
242
+ JobStatusTracker().cancel_job(job_id, "Cancelled due to user request.")
243
+ return True
244
+
245
+ class Singleton(type):
246
+ _instances = {}
247
+ def __call__(cls, *args, **kwargs):
248
+ if cls not in cls._instances:
249
+ cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
250
+ return cls._instances[cls]
251
+
252
+ # Maintains persistent state required by API, e.g. thread pook, list of submitted jobs.
253
+ class ApiState(metaclass=Singleton):
254
+
255
+ ## Locking concurrency to 1. Concurrent generation does seem to work, but it's not clear if it's safe.
256
+ ## TODO: more experimentation required.
257
+ deforum_api_executor = ThreadPoolExecutor(max_workers=1)
258
+ submitted_jobs : Dict[str, Any] = {}
259
+
260
+ @staticmethod
261
+ def cleanup():
262
+ ApiState().deforum_api_executor.shutdown(wait=False)
263
+
264
+ def submit_job(self, batch_id: str, job_ids: [str], deforum_settings: List[Any], opts_overrides: Dict[str, Any]):
265
+ log.debug(f"Submitting batch {batch_id} to threadpool.")
266
+ future = self.deforum_api_executor.submit(lambda: run_deforum_batch(batch_id, job_ids, deforum_settings, opts_overrides))
267
+ self.submitted_jobs[batch_id] = future
268
+
269
+ atexit.register(ApiState.cleanup)
270
+
271
+
272
+ class A1111OptionsOverrider(object):
273
+ def __init__(self, opts_overrides: Dict[str, Any]):
274
+ self.opts_overrides = opts_overrides
275
+
276
+ def __enter__(self):
277
+ if self.opts_overrides is not None and len(self.opts_overrides)>1:
278
+ self.original_opts = {k: opts.data[k] for k in self.opts_overrides.keys() if k in opts.data}
279
+ log.debug(f"Captured options to override: {self.original_opts}")
280
+ log.info(f"Setting options: {self.opts_overrides}")
281
+ for k, v in self.opts_overrides.items():
282
+ setattr(opts, k, v)
283
+ else:
284
+ self.original_opts = None
285
+ return self
286
+
287
+ def __exit__(self, exception_type, exception_value, traceback):
288
+ if (exception_type is not None):
289
+ log.warning(f"Error during batch execution: {exception_type} - {exception_value}")
290
+ log.debug(f"{traceback}")
291
+ if (self.original_opts is not None):
292
+ log.info(f"Restoring options: {self.original_opts}")
293
+ for k, v in self.original_opts.items():
294
+ setattr(opts, k, v)
295
+
296
+
297
+ # Maintains state that tracks status of submitted jobs,
298
+ # so that clients can query job status.
299
+ class JobStatusTracker(metaclass=Singleton):
300
+ statuses: Dict[str, DeforumJobStatus] = {}
301
+ batches: Dict[str, List[str]] = {}
302
+
303
+ def accept_job(self, batch_id : str, job_id: str, deforum_settings : List[Dict[str, Any]] , options_overrides : Dict[str, Any]):
304
+ if batch_id in self.batches:
305
+ self.batches[batch_id].append(job_id)
306
+ else:
307
+ self.batches[batch_id] = [job_id]
308
+
309
+ now = datetime.now().timestamp()
310
+ self.statuses[job_id] = DeforumJobStatus(
311
+ id=job_id,
312
+ status= DeforumJobStatusCategory.ACCEPTED,
313
+ phase=DeforumJobPhase.QUEUED,
314
+ error_type=DeforumJobErrorType.NONE,
315
+ phase_progress=0.0,
316
+ started_at=now,
317
+ last_updated=now,
318
+ execution_time=0,
319
+ update_interval_time=0,
320
+ updates=0,
321
+ message=None,
322
+ outdir=None,
323
+ timestring=None,
324
+ deforum_settings=deforum_settings,
325
+ options_overrides=options_overrides,
326
+ )
327
+
328
+ def update_phase(self, job_id: str, phase: DeforumJobPhase, progress: float = 0):
329
+ if job_id in self.statuses:
330
+ current_status = self.statuses[job_id]
331
+ now = datetime.now().timestamp()
332
+ new_status = replace(
333
+ current_status,
334
+ phase=phase,
335
+ phase_progress=progress,
336
+ last_updated=now,
337
+ execution_time=now-current_status.started_at,
338
+ update_interval_time=now-current_status.last_updated,
339
+ updates=current_status.updates+1
340
+ )
341
+ self.statuses[job_id] = new_status
342
+
343
+ def update_output_info(self, job_id: str, outdir: str, timestring: str):
344
+ if job_id in self.statuses:
345
+ current_status = self.statuses[job_id]
346
+ now = datetime.now().timestamp()
347
+ new_status = replace(
348
+ current_status,
349
+ outdir=outdir,
350
+ timestring=timestring,
351
+ last_updated=now,
352
+ execution_time=now-current_status.started_at,
353
+ update_interval_time=now-current_status.last_updated,
354
+ updates=current_status.updates+1
355
+ )
356
+ self.statuses[job_id] = new_status
357
+
358
+ def complete_job(self, job_id: str):
359
+ if job_id in self.statuses:
360
+ current_status = self.statuses[job_id]
361
+ now = datetime.now().timestamp()
362
+ new_status = replace(
363
+ current_status,
364
+ status=DeforumJobStatusCategory.SUCCEEDED,
365
+ phase=DeforumJobPhase.DONE,
366
+ phase_progress=1.0,
367
+ last_updated=now,
368
+ execution_time=now-current_status.started_at,
369
+ update_interval_time=now-current_status.last_updated,
370
+ updates=current_status.updates+1
371
+ )
372
+ self.statuses[job_id] = new_status
373
+
374
+ def fail_job(self, job_id: str, error_type: str, message: str):
375
+ if job_id in self.statuses:
376
+ current_status = self.statuses[job_id]
377
+ now = datetime.now().timestamp()
378
+ new_status = replace(
379
+ current_status,
380
+ status=DeforumJobStatusCategory.FAILED,
381
+ error_type=error_type,
382
+ message=message,
383
+ last_updated=now,
384
+ execution_time=now-current_status.started_at,
385
+ update_interval_time=now-current_status.last_updated,
386
+ updates=current_status.updates+1
387
+ )
388
+ self.statuses[job_id] = new_status
389
+
390
+ def cancel_job(self, job_id: str, message: str):
391
+ if job_id in self.statuses:
392
+ current_status = self.statuses[job_id]
393
+ now = datetime.now().timestamp()
394
+ new_status = replace(
395
+ current_status,
396
+ status=DeforumJobStatusCategory.CANCELLED,
397
+ message=message,
398
+ last_updated=now,
399
+ execution_time=now-current_status.started_at,
400
+ update_interval_time=now-current_status.last_updated,
401
+ updates=current_status.updates+1
402
+ )
403
+ self.statuses[job_id] = new_status
404
+
405
+
406
+ def get(self, job_id:str):
407
+ return self.statuses[job_id] if job_id in self.statuses else None
408
+
409
+ def deforum_init_batch(_: gr.Blocks, app: FastAPI):
410
+ deforum_sys_extend()
411
+ settings_files = [open(filename, 'r') for filename in cmd_opts.deforum_run_now.split(",")]
412
+ [batch_id, job_ids] = make_ids(len(settings_files))
413
+ log.info(f"Starting init batch {batch_id} with job(s) {job_ids}...")
414
+
415
+ run_deforum_batch(batch_id, job_ids, settings_files, None)
416
+
417
+ if cmd_opts.deforum_terminate_after_run_now:
418
+ import os
419
+ os._exit(0)
420
+
421
+ # A simplified, but safe version of Deforum's API
422
+ def deforum_simple_api(_: gr.Blocks, app: FastAPI):
423
+ deforum_sys_extend()
424
+ from fastapi.exceptions import RequestValidationError
425
+ from fastapi.responses import JSONResponse
426
+ from fastapi import FastAPI, Query, Request, UploadFile
427
+ from fastapi.encoders import jsonable_encoder
428
+ from deforum_helpers.general_utils import get_deforum_version
429
+ import uuid, pathlib
430
+
431
+ @app.exception_handler(RequestValidationError)
432
+ async def validation_exception_handler(request: Request, exc: RequestValidationError):
433
+ return JSONResponse(
434
+ status_code=422,
435
+ content=jsonable_encoder({"detail": exc.errors(), "body": exc.body}),
436
+ )
437
+
438
+ @app.get("/deforum/api_version")
439
+ async def deforum_api_version():
440
+ return JSONResponse(content={"version": '1.0'})
441
+
442
+ @app.get("/deforum/version")
443
+ async def deforum_version():
444
+ return JSONResponse(content={"version": get_deforum_version()})
445
+
446
+ @app.post("/deforum/run")
447
+ async def deforum_run(settings_json:str, allowed_params:str = ""):
448
+ try:
449
+ allowed_params = allowed_params.split(';')
450
+ deforum_settings = json.loads(settings_json)
451
+ with open(os.path.join(pathlib.Path(__file__).parent.absolute(), 'default_settings.txt'), 'r', encoding='utf-8') as f:
452
+ default_settings = json.loads(f.read())
453
+ for k, _ in default_settings.items():
454
+ if k in deforum_settings and k in allowed_params:
455
+ default_settings[k] = deforum_settings[k]
456
+ deforum_settings = default_settings
457
+ run_id = uuid.uuid4().hex
458
+ deforum_settings['batch_name'] = run_id
459
+ deforum_settings = json.dumps(deforum_settings, indent=4, ensure_ascii=False)
460
+ settings_file = f"{run_id}.txt"
461
+ with open(settings_file, 'w', encoding='utf-8') as f:
462
+ f.write(deforum_settings)
463
+ class SettingsWrapper:
464
+ def __init__(self, filename):
465
+ self.name = filename
466
+ [batch_id, job_ids] = make_ids(1)
467
+ outdir = os.path.join(os.getcwd(), opts.outdir_samples or opts.outdir_img2img_samples, str(run_id))
468
+ run_deforum_batch(batch_id, job_ids, [SettingsWrapper(settings_file)], None)
469
+ return JSONResponse(content={"outdir": outdir})
470
+ except Exception as e:
471
+ print(e)
472
+ traceback.print_exc()
473
+ return JSONResponse(status_code=500, content={"detail": "An error occurred while processing the video."},)
474
+
475
+ # Setup A1111 initialisation hooks
476
+ try:
477
+ import modules.script_callbacks as script_callbacks
478
+ if cmd_opts.deforum_api:
479
+ script_callbacks.on_app_started(deforum_api)
480
+ if cmd_opts.deforum_simple_api:
481
+ script_callbacks.on_app_started(deforum_simple_api)
482
+ if cmd_opts.deforum_run_now:
483
+ script_callbacks.on_app_started(deforum_init_batch)
484
+ except:
485
+ pass
extensions-builtin/sd-webui-deforum/scripts/deforum_api_models.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ from pydantic import BaseModel
18
+ from typing import Any, Dict, List, Optional, Union
19
+ from dataclasses import dataclass
20
+ from enum import Enum
21
+
22
+ class Batch(BaseModel):
23
+ deforum_settings : Optional[Union[Dict[str, Any],List[Dict[str, Any]]]]
24
+ options_overrides : Optional[Dict[str, Any]]
25
+
26
+ class DeforumJobStatusCategory(str, Enum):
27
+ ACCEPTED = "ACCEPTED"
28
+ SUCCEEDED = "SUCCEEDED"
29
+ FAILED = "FAILED"
30
+ CANCELLED = "CANCELLED"
31
+
32
+ class DeforumJobPhase(str, Enum):
33
+ QUEUED = "QUEUED"
34
+ PREPARING = "PREPARING"
35
+ GENERATING = "GENERATING"
36
+ POST_PROCESSING = "POST_PROCESSING"
37
+ DONE = "DONE"
38
+
39
+ class DeforumJobErrorType(str, Enum):
40
+ NONE = "NONE"
41
+ RETRYABLE = "RETRYABLE"
42
+ TERMINAL = "TERMINAL"
43
+
44
+ @dataclass(frozen=True)
45
+ class DeforumJobStatus(BaseModel):
46
+ id: str
47
+ status : DeforumJobStatusCategory
48
+ phase : DeforumJobPhase
49
+ error_type : DeforumJobErrorType
50
+ phase_progress : float
51
+ started_at: float
52
+ last_updated: float
53
+ execution_time: float # time between job start and the last status update
54
+ update_interval_time: float # time between the last two status updates
55
+ updates: int # number of status updates so far
56
+ message: Optional[str]
57
+ outdir: Optional[str]
58
+ timestring: Optional[str]
59
+ deforum_settings : Optional[List[Dict[str, Any]]]
60
+ options_overrides : Optional[Dict[str, Any]]
extensions-builtin/sd-webui-deforum/scripts/deforum_extend_paths.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import os
18
+ import sys
19
+
20
+ def deforum_sys_extend():
21
+ deforum_folder_name = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
22
+
23
+ basedirs = [os.getcwd()]
24
+ if 'google.colab' in sys.modules:
25
+ basedirs.append('/content/gdrive/MyDrive/sd/stable-diffusion-webui') # for TheLastBen's colab
26
+ for _ in basedirs:
27
+ deforum_paths_to_ensure = [
28
+ os.path.join(deforum_folder_name, 'scripts'),
29
+ os.path.join(deforum_folder_name, 'scripts', 'deforum_helpers', 'src')
30
+ ]
31
+ for deforum_scripts_path_fix in deforum_paths_to_ensure:
32
+ if deforum_scripts_path_fix not in sys.path:
33
+ sys.path.extend([deforum_scripts_path_fix])
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/114763196.jpg ADDED

Git LFS Details

  • SHA256: b06270a819babd08b5ec9d06a539781979f31c3ccb47d9e3d56621e804bd670f
  • Pointer size: 130 Bytes
  • Size of remote file: 24.7 kB
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/RAFT.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import torch
18
+ import numpy as np
19
+ import torchvision.transforms.functional as F
20
+ from torchvision.models.optical_flow import Raft_Large_Weights, raft_large
21
+
22
+ class RAFT:
23
+ def __init__(self):
24
+ weights = Raft_Large_Weights.DEFAULT
25
+ self.transforms = weights.transforms()
26
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
27
+ self.model = raft_large(weights=weights, progress=False).to(self.device).eval()
28
+
29
+ def predict(self, image1, image2, num_flow_updates:int = 50):
30
+ img1 = F.to_tensor(image1)
31
+ img2 = F.to_tensor(image2)
32
+ img1_batch, img2_batch = img1.unsqueeze(0), img2.unsqueeze(0)
33
+ img1_batch, img2_batch = self.transforms(img1_batch, img2_batch)
34
+
35
+ with torch.no_grad():
36
+ flow = self.model(image1=img1_batch.to(self.device), image2=img2_batch.to(self.device), num_flow_updates=num_flow_updates)[-1].cpu().numpy()[0]
37
+
38
+ # align the flow array to have the shape (w, h, 2) so it's compatible with the rest of CV2's flow methods
39
+ flow = np.transpose(flow, (1, 2, 0))
40
+
41
+ return flow
42
+
43
+ def delete_model(self):
44
+ del self.model
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/animation.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import numpy as np
18
+ import cv2
19
+ import py3d_tools as p3d # this is actually a file in our /src folder!
20
+ from functools import reduce
21
+ import math
22
+ import torch
23
+ from einops import rearrange
24
+ from modules.shared import state, opts
25
+ from .prompt import check_is_number
26
+ from .general_utils import debug_print
27
+
28
+ def sample_from_cv2(sample: np.ndarray) -> torch.Tensor:
29
+ sample = ((sample.astype(float) / 255.0) * 2) - 1
30
+ sample = sample[None].transpose(0, 3, 1, 2).astype(np.float16)
31
+ sample = torch.from_numpy(sample)
32
+ return sample
33
+
34
+ def sample_to_cv2(sample: torch.Tensor, type=np.uint8) -> np.ndarray:
35
+ sample_f32 = rearrange(sample.squeeze().cpu().numpy(), "c h w -> h w c").astype(np.float32)
36
+ sample_f32 = ((sample_f32 * 0.5) + 0.5).clip(0, 1)
37
+ sample_int8 = (sample_f32 * 255)
38
+ return sample_int8.astype(type)
39
+
40
+ def construct_RotationMatrixHomogenous(rotation_angles):
41
+ assert(type(rotation_angles)==list and len(rotation_angles)==3)
42
+ RH = np.eye(4,4)
43
+ cv2.Rodrigues(np.array(rotation_angles), RH[0:3, 0:3])
44
+ return RH
45
+
46
+ # https://en.wikipedia.org/wiki/Rotation_matrix
47
+ def getRotationMatrixManual(rotation_angles):
48
+
49
+ rotation_angles = [np.deg2rad(x) for x in rotation_angles]
50
+
51
+ phi = rotation_angles[0] # around x
52
+ gamma = rotation_angles[1] # around y
53
+ theta = rotation_angles[2] # around z
54
+
55
+ # X rotation
56
+ Rphi = np.eye(4,4)
57
+ sp = np.sin(phi)
58
+ cp = np.cos(phi)
59
+ Rphi[1,1] = cp
60
+ Rphi[2,2] = Rphi[1,1]
61
+ Rphi[1,2] = -sp
62
+ Rphi[2,1] = sp
63
+
64
+ # Y rotation
65
+ Rgamma = np.eye(4,4)
66
+ sg = np.sin(gamma)
67
+ cg = np.cos(gamma)
68
+ Rgamma[0,0] = cg
69
+ Rgamma[2,2] = Rgamma[0,0]
70
+ Rgamma[0,2] = sg
71
+ Rgamma[2,0] = -sg
72
+
73
+ # Z rotation (in-image-plane)
74
+ Rtheta = np.eye(4,4)
75
+ st = np.sin(theta)
76
+ ct = np.cos(theta)
77
+ Rtheta[0,0] = ct
78
+ Rtheta[1,1] = Rtheta[0,0]
79
+ Rtheta[0,1] = -st
80
+ Rtheta[1,0] = st
81
+
82
+ R = reduce(lambda x,y : np.matmul(x,y), [Rphi, Rgamma, Rtheta])
83
+
84
+ return R
85
+
86
+ def getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sidelength):
87
+
88
+ ptsIn2D = ptsIn[0,:]
89
+ ptsOut2D = ptsOut[0,:]
90
+ ptsOut2Dlist = []
91
+ ptsIn2Dlist = []
92
+
93
+ for i in range(0,4):
94
+ ptsOut2Dlist.append([ptsOut2D[i,0], ptsOut2D[i,1]])
95
+ ptsIn2Dlist.append([ptsIn2D[i,0], ptsIn2D[i,1]])
96
+
97
+ pin = np.array(ptsIn2Dlist) + [W/2.,H/2.]
98
+ pout = (np.array(ptsOut2Dlist) + [1.,1.]) * (0.5*sidelength)
99
+ pin = pin.astype(np.float32)
100
+ pout = pout.astype(np.float32)
101
+
102
+ return pin, pout
103
+
104
+
105
+ def warpMatrix(W, H, theta, phi, gamma, scale, fV):
106
+
107
+ # M is to be estimated
108
+ M = np.eye(4, 4)
109
+
110
+ fVhalf = np.deg2rad(fV/2.)
111
+ d = np.sqrt(W*W+H*H)
112
+ sideLength = scale*d/np.cos(fVhalf)
113
+ h = d/(2.0*np.sin(fVhalf))
114
+ n = h-(d/2.0)
115
+ f = h+(d/2.0)
116
+
117
+ # Translation along Z-axis by -h
118
+ T = np.eye(4,4)
119
+ T[2,3] = -h
120
+
121
+ # Rotation matrices around x,y,z
122
+ R = getRotationMatrixManual([phi, gamma, theta])
123
+
124
+
125
+ # Projection Matrix
126
+ P = np.eye(4,4)
127
+ P[0,0] = 1.0/np.tan(fVhalf)
128
+ P[1,1] = P[0,0]
129
+ P[2,2] = -(f+n)/(f-n)
130
+ P[2,3] = -(2.0*f*n)/(f-n)
131
+ P[3,2] = -1.0
132
+
133
+ # pythonic matrix multiplication
134
+ F = reduce(lambda x,y : np.matmul(x,y), [P, T, R])
135
+
136
+ # shape should be 1,4,3 for ptsIn and ptsOut since perspectiveTransform() expects data in this way.
137
+ # In C++, this can be achieved by Mat ptsIn(1,4,CV_64FC3);
138
+ ptsIn = np.array([[
139
+ [-W/2., H/2., 0.],[ W/2., H/2., 0.],[ W/2.,-H/2., 0.],[-W/2.,-H/2., 0.]
140
+ ]])
141
+ ptsOut = np.array(np.zeros((ptsIn.shape), dtype=ptsIn.dtype))
142
+ ptsOut = cv2.perspectiveTransform(ptsIn, F)
143
+
144
+ ptsInPt2f, ptsOutPt2f = getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sideLength)
145
+
146
+ # check float32 otherwise OpenCV throws an error
147
+ assert(ptsInPt2f.dtype == np.float32)
148
+ assert(ptsOutPt2f.dtype == np.float32)
149
+ M33 = cv2.getPerspectiveTransform(ptsInPt2f,ptsOutPt2f)
150
+
151
+ return M33, sideLength
152
+
153
+ def get_flip_perspective_matrix(W, H, keys, frame_idx):
154
+ perspective_flip_theta = keys.perspective_flip_theta_series[frame_idx]
155
+ perspective_flip_phi = keys.perspective_flip_phi_series[frame_idx]
156
+ perspective_flip_gamma = keys.perspective_flip_gamma_series[frame_idx]
157
+ perspective_flip_fv = keys.perspective_flip_fv_series[frame_idx]
158
+ M,sl = warpMatrix(W, H, perspective_flip_theta, perspective_flip_phi, perspective_flip_gamma, 1., perspective_flip_fv);
159
+ post_trans_mat = np.float32([[1, 0, (W-sl)/2], [0, 1, (H-sl)/2]])
160
+ post_trans_mat = np.vstack([post_trans_mat, [0,0,1]])
161
+ bM = np.matmul(M, post_trans_mat)
162
+ return bM
163
+
164
+ def flip_3d_perspective(anim_args, prev_img_cv2, keys, frame_idx):
165
+ W, H = (prev_img_cv2.shape[1], prev_img_cv2.shape[0])
166
+ return cv2.warpPerspective(
167
+ prev_img_cv2,
168
+ get_flip_perspective_matrix(W, H, keys, frame_idx),
169
+ (W, H),
170
+ borderMode=cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE
171
+ )
172
+
173
+ def anim_frame_warp(prev_img_cv2, args, anim_args, keys, frame_idx, depth_model=None, depth=None, device='cuda', half_precision = False):
174
+
175
+ if anim_args.use_depth_warping:
176
+ if depth is None and depth_model is not None:
177
+ depth = depth_model.predict(prev_img_cv2, anim_args.midas_weight, half_precision)
178
+
179
+ else:
180
+ depth = None
181
+
182
+ if anim_args.animation_mode == '2D':
183
+ prev_img = anim_frame_warp_2d(prev_img_cv2, args, anim_args, keys, frame_idx)
184
+ else: # '3D'
185
+ prev_img = anim_frame_warp_3d(device, prev_img_cv2, depth, anim_args, keys, frame_idx)
186
+
187
+ return prev_img, depth
188
+
189
+ def anim_frame_warp_2d(prev_img_cv2, args, anim_args, keys, frame_idx):
190
+ angle = keys.angle_series[frame_idx]
191
+ zoom = keys.zoom_series[frame_idx]
192
+ translation_x = keys.translation_x_series[frame_idx]
193
+ translation_y = keys.translation_y_series[frame_idx]
194
+ transform_center_x = keys.transform_center_x_series[frame_idx]
195
+ transform_center_y = keys.transform_center_y_series[frame_idx]
196
+ center_point = (args.W * transform_center_x, args.H * transform_center_y)
197
+ rot_mat = cv2.getRotationMatrix2D(center_point, angle, zoom)
198
+ trans_mat = np.float32([[1, 0, translation_x], [0, 1, translation_y]])
199
+ trans_mat = np.vstack([trans_mat, [0,0,1]])
200
+ rot_mat = np.vstack([rot_mat, [0,0,1]])
201
+ if anim_args.enable_perspective_flip:
202
+ bM = get_flip_perspective_matrix(args.W, args.H, keys, frame_idx)
203
+ rot_mat = np.matmul(bM, rot_mat, trans_mat)
204
+ else:
205
+ rot_mat = np.matmul(rot_mat, trans_mat)
206
+ return cv2.warpPerspective(
207
+ prev_img_cv2,
208
+ rot_mat,
209
+ (prev_img_cv2.shape[1], prev_img_cv2.shape[0]),
210
+ borderMode=cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE
211
+ )
212
+
213
+ def anim_frame_warp_3d(device, prev_img_cv2, depth, anim_args, keys, frame_idx):
214
+ TRANSLATION_SCALE = 1.0/200.0 # matches Disco
215
+ translate_xyz = [
216
+ -keys.translation_x_series[frame_idx] * TRANSLATION_SCALE,
217
+ keys.translation_y_series[frame_idx] * TRANSLATION_SCALE,
218
+ -keys.translation_z_series[frame_idx] * TRANSLATION_SCALE
219
+ ]
220
+ rotate_xyz = [
221
+ math.radians(keys.rotation_3d_x_series[frame_idx]),
222
+ math.radians(keys.rotation_3d_y_series[frame_idx]),
223
+ math.radians(keys.rotation_3d_z_series[frame_idx])
224
+ ]
225
+ if anim_args.enable_perspective_flip:
226
+ prev_img_cv2 = flip_3d_perspective(anim_args, prev_img_cv2, keys, frame_idx)
227
+ rot_mat = p3d.euler_angles_to_matrix(torch.tensor(rotate_xyz, device=device), "XYZ").unsqueeze(0)
228
+ result = transform_image_3d_switcher(device if not device.type.startswith('mps') else torch.device('cpu'), prev_img_cv2, depth, rot_mat, translate_xyz, anim_args, keys, frame_idx)
229
+ torch.cuda.empty_cache()
230
+ return result
231
+
232
+ def transform_image_3d_switcher(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx):
233
+ if anim_args.depth_algorithm.lower() in ['midas+adabins (old)', 'zoe+adabins (old)']:
234
+ return transform_image_3d_legacy(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx)
235
+ else:
236
+ return transform_image_3d_new(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx)
237
+
238
+ def transform_image_3d_legacy(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx):
239
+ # adapted and optimized version of transform_image_3d from Disco Diffusion https://github.com/alembics/disco-diffusion
240
+ w, h = prev_img_cv2.shape[1], prev_img_cv2.shape[0]
241
+
242
+ if anim_args.aspect_ratio_use_old_formula:
243
+ aspect_ratio = float(w)/float(h)
244
+ else:
245
+ aspect_ratio = keys.aspect_ratio_series[frame_idx]
246
+
247
+ near = keys.near_series[frame_idx]
248
+ far = keys.far_series[frame_idx]
249
+ fov_deg = keys.fov_series[frame_idx]
250
+ persp_cam_old = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, device=device)
251
+ persp_cam_new = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, R=rot_mat, T=torch.tensor([translate]), device=device)
252
+
253
+ # range of [-1,1] is important to torch grid_sample's padding handling
254
+ y,x = torch.meshgrid(torch.linspace(-1.,1.,h,dtype=torch.float32,device=device),torch.linspace(-1.,1.,w,dtype=torch.float32,device=device))
255
+ if depth_tensor is None:
256
+ z = torch.ones_like(x)
257
+ else:
258
+ z = torch.as_tensor(depth_tensor, dtype=torch.float32, device=device)
259
+ xyz_old_world = torch.stack((x.flatten(), y.flatten(), z.flatten()), dim=1)
260
+
261
+ xyz_old_cam_xy = persp_cam_old.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2]
262
+ xyz_new_cam_xy = persp_cam_new.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2]
263
+
264
+ offset_xy = xyz_new_cam_xy - xyz_old_cam_xy
265
+ # affine_grid theta param expects a batch of 2D mats. Each is 2x3 to do rotation+translation.
266
+ identity_2d_batch = torch.tensor([[1.,0.,0.],[0.,1.,0.]], device=device).unsqueeze(0)
267
+ # coords_2d will have shape (N,H,W,2).. which is also what grid_sample needs.
268
+ coords_2d = torch.nn.functional.affine_grid(identity_2d_batch, [1,1,h,w], align_corners=False)
269
+ offset_coords_2d = coords_2d - torch.reshape(offset_xy, (h,w,2)).unsqueeze(0)
270
+
271
+ image_tensor = rearrange(torch.from_numpy(prev_img_cv2.astype(np.float32)), 'h w c -> c h w').to(device)
272
+ new_image = torch.nn.functional.grid_sample(
273
+ image_tensor.add(1/512 - 0.0001).unsqueeze(0),
274
+ offset_coords_2d,
275
+ mode=anim_args.sampling_mode,
276
+ padding_mode=anim_args.padding_mode,
277
+ align_corners=False
278
+ )
279
+
280
+ # convert back to cv2 style numpy array
281
+ result = rearrange(
282
+ new_image.squeeze().clamp(0,255),
283
+ 'c h w -> h w c'
284
+ ).cpu().numpy().astype(prev_img_cv2.dtype)
285
+ return result
286
+
287
+ def transform_image_3d_new(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx):
288
+ '''
289
+ originally an adapted and optimized version of transform_image_3d from Disco Diffusion https://github.com/alembics/disco-diffusion
290
+ modified by reallybigname to control various incoming tensors
291
+ '''
292
+ if anim_args.depth_algorithm.lower().startswith('midas'): # 'Midas-3-Hybrid' or 'Midas-3.1-BeitLarge'
293
+ depth = 1
294
+ depth_factor = -1
295
+ depth_offset = -2
296
+ elif anim_args.depth_algorithm.lower() == "adabins":
297
+ depth = 1
298
+ depth_factor = 1
299
+ depth_offset = 1
300
+ elif anim_args.depth_algorithm.lower() == "leres":
301
+ depth = 1
302
+ depth_factor = 1
303
+ depth_offset = 1
304
+ elif anim_args.depth_algorithm.lower() == "zoe":
305
+ depth = 1
306
+ depth_factor = 1
307
+ depth_offset = 1
308
+ else:
309
+ raise Exception(f"Unknown depth_algorithm passed to transform_image_3d function: {anim_args.depth_algorithm}")
310
+
311
+ w, h = prev_img_cv2.shape[1], prev_img_cv2.shape[0]
312
+
313
+ # depth stretching aspect ratio (has nothing to do with image dimensions - which is why the old formula was flawed)
314
+ aspect_ratio = float(w)/float(h) if anim_args.aspect_ratio_use_old_formula else keys.aspect_ratio_series[frame_idx]
315
+
316
+ # get projection keys
317
+ near = keys.near_series[frame_idx]
318
+ far = keys.far_series[frame_idx]
319
+ fov_deg = keys.fov_series[frame_idx]
320
+
321
+ # get perspective cams old (still) and new (transformed)
322
+ persp_cam_old = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, device=device)
323
+ persp_cam_new = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, R=rot_mat, T=torch.tensor([translate]), device=device)
324
+
325
+ # make xy meshgrid - range of [-1,1] is important to torch grid_sample's padding handling
326
+ y,x = torch.meshgrid(torch.linspace(-1.,1.,h,dtype=torch.float32,device=device),torch.linspace(-1.,1.,w,dtype=torch.float32,device=device))
327
+
328
+ # test tensor for validity (some are corrupted for some reason)
329
+ depth_tensor_invalid = depth_tensor is None or torch.isnan(depth_tensor).any() or torch.isinf(depth_tensor).any() or depth_tensor.min() == depth_tensor.max()
330
+
331
+ if depth_tensor is not None:
332
+ debug_print(f"Depth_T.min: {depth_tensor.min()}, Depth_T.max: {depth_tensor.max()}")
333
+ # if invalid, create flat z for this frame
334
+ if depth_tensor_invalid:
335
+ # if none, then 3D depth is turned off, so no warning is needed.
336
+ if depth_tensor is not None:
337
+ print("Depth tensor invalid. Generating a Flat depth for this frame.")
338
+ # create flat depth
339
+ z = torch.ones_like(x)
340
+ # create z from depth tensor
341
+ else:
342
+ # prepare tensor between 0 and 1 with optional equalization and autocontrast
343
+ depth_normalized = prepare_depth_tensor(depth_tensor)
344
+
345
+ # Rescale the depth values to depth with offset (depth 2 and offset -1 would be -1 to +11)
346
+ depth_final = depth_normalized * depth + depth_offset
347
+
348
+ # depth factor (1 is normal. -1 is inverted)
349
+ if depth_factor != 1:
350
+ depth_final *= depth_factor
351
+
352
+ # console reporting of depth normalization, min, max, diff
353
+ # will *only* print to console if Dev mode is enabled in general settings of Deforum
354
+ txt_depth_min, txt_depth_max = '{:.2f}'.format(float(depth_tensor.min())), '{:.2f}'.format(float(depth_tensor.max()))
355
+ diff = '{:.2f}'.format(float(depth_tensor.max()) - float(depth_tensor.min()))
356
+ console_txt = f"\033[36mDepth normalized to {depth_final.min()}/{depth_final.max()} from"
357
+ debug_print(f"{console_txt} {txt_depth_min}/{txt_depth_max} diff {diff}\033[0m")
358
+
359
+ # add z from depth
360
+ z = torch.as_tensor(depth_final, dtype=torch.float32, device=device)
361
+
362
+ # calculate offset_xy
363
+ xyz_old_world = torch.stack((x.flatten(), y.flatten(), z.flatten()), dim=1)
364
+ xyz_old_cam_xy = persp_cam_old.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2]
365
+ xyz_new_cam_xy = persp_cam_new.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2]
366
+ offset_xy = xyz_new_cam_xy - xyz_old_cam_xy
367
+
368
+ # affine_grid theta param expects a batch of 2D mats. Each is 2x3 to do rotation+translation.
369
+ identity_2d_batch = torch.tensor([[1.,0.,0.],[0.,1.,0.]], device=device).unsqueeze(0)
370
+
371
+ # coords_2d will have shape (N,H,W,2).. which is also what grid_sample needs.
372
+ coords_2d = torch.nn.functional.affine_grid(identity_2d_batch, [1,1,h,w], align_corners=False)
373
+ offset_coords_2d = coords_2d - torch.reshape(offset_xy, (h,w,2)).unsqueeze(0)
374
+
375
+ # do the hyperdimensional remap
376
+ image_tensor = rearrange(torch.from_numpy(prev_img_cv2.astype(np.float32)), 'h w c -> c h w').to(device)
377
+ new_image = torch.nn.functional.grid_sample(
378
+ image_tensor.unsqueeze(0), # image_tensor.add(1/512 - 0.0001).unsqueeze(0),
379
+ offset_coords_2d,
380
+ mode=anim_args.sampling_mode,
381
+ padding_mode=anim_args.padding_mode,
382
+ align_corners=False
383
+ )
384
+
385
+ # convert back to cv2 style numpy array
386
+ result = rearrange(
387
+ new_image.squeeze().clamp(0,255),
388
+ 'c h w -> h w c'
389
+ ).cpu().numpy().astype(prev_img_cv2.dtype)
390
+ return result
391
+
392
+ def prepare_depth_tensor(depth_tensor=None):
393
+ # Prepares a depth tensor with normalization & equalization between 0 and 1
394
+ depth_range = depth_tensor.max() - depth_tensor.min()
395
+ depth_tensor = (depth_tensor - depth_tensor.min()) / depth_range
396
+ depth_tensor = depth_equalization(depth_tensor=depth_tensor)
397
+ return depth_tensor
398
+
399
+ def depth_equalization(depth_tensor):
400
+ """
401
+ Perform histogram equalization on a single-channel depth tensor.
402
+
403
+ Args:
404
+ depth_tensor (torch.Tensor): A 2D depth tensor (H, W).
405
+
406
+ Returns:
407
+ torch.Tensor: Equalized depth tensor (2D).
408
+ """
409
+
410
+ # Convert the depth tensor to a NumPy array for processing
411
+ depth_array = depth_tensor.cpu().numpy()
412
+
413
+ # Calculate the histogram of the depth values using a specified number of bins
414
+ # Increase the number of bins for higher precision depth tensors
415
+ hist, bin_edges = np.histogram(depth_array, bins=1024, range=(0, 1))
416
+
417
+ # Calculate the cumulative distribution function (CDF) of the histogram
418
+ cdf = hist.cumsum()
419
+
420
+ # Normalize the CDF so that the maximum value is 1
421
+ cdf = cdf / float(cdf[-1])
422
+
423
+ # Perform histogram equalization by mapping the original depth values to the CDF values
424
+ equalized_depth_array = np.interp(depth_array, bin_edges[:-1], cdf)
425
+
426
+ # Convert the equalized depth array back to a PyTorch tensor and return it
427
+ equalized_depth_tensor = torch.from_numpy(equalized_depth_array).to(depth_tensor.device)
428
+
429
+ return equalized_depth_tensor
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/animation_key_frames.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import re
18
+ import numpy as np
19
+ import numexpr
20
+ import pandas as pd
21
+ from .prompt import check_is_number
22
+ from modules import scripts, shared
23
+
24
+ class DeformAnimKeys():
25
+ def __init__(self, anim_args, seed=-1):
26
+ self.fi = FrameInterpolater(anim_args.max_frames, seed)
27
+ self.angle_series = self.fi.parse_inbetweens(anim_args.angle, 'angle')
28
+ self.transform_center_x_series = self.fi.parse_inbetweens(anim_args.transform_center_x, 'transform_center_x')
29
+ self.transform_center_y_series = self.fi.parse_inbetweens(anim_args.transform_center_y, 'transform_center_y')
30
+ self.zoom_series = self.fi.parse_inbetweens(anim_args.zoom, 'zoom')
31
+ self.translation_x_series = self.fi.parse_inbetweens(anim_args.translation_x, 'translation_x')
32
+ self.translation_y_series = self.fi.parse_inbetweens(anim_args.translation_y, 'translation_y')
33
+ self.translation_z_series = self.fi.parse_inbetweens(anim_args.translation_z, 'translation_z')
34
+ self.rotation_3d_x_series = self.fi.parse_inbetweens(anim_args.rotation_3d_x, 'rotation_3d_x')
35
+ self.rotation_3d_y_series = self.fi.parse_inbetweens(anim_args.rotation_3d_y, 'rotation_3d_y')
36
+ self.rotation_3d_z_series = self.fi.parse_inbetweens(anim_args.rotation_3d_z, 'rotation_3d_z')
37
+ self.perspective_flip_theta_series = self.fi.parse_inbetweens(anim_args.perspective_flip_theta, 'perspective_flip_theta')
38
+ self.perspective_flip_phi_series = self.fi.parse_inbetweens(anim_args.perspective_flip_phi, 'perspective_flip_phi')
39
+ self.perspective_flip_gamma_series = self.fi.parse_inbetweens(anim_args.perspective_flip_gamma, 'perspective_flip_gamma')
40
+ self.perspective_flip_fv_series = self.fi.parse_inbetweens(anim_args.perspective_flip_fv, 'perspective_flip_fv')
41
+ self.noise_schedule_series = self.fi.parse_inbetweens(anim_args.noise_schedule, 'noise_schedule')
42
+ self.strength_schedule_series = self.fi.parse_inbetweens(anim_args.strength_schedule, 'strength_schedule')
43
+ self.contrast_schedule_series = self.fi.parse_inbetweens(anim_args.contrast_schedule, 'contrast_schedule')
44
+ self.cfg_scale_schedule_series = self.fi.parse_inbetweens(anim_args.cfg_scale_schedule, 'cfg_scale_schedule')
45
+ self.ddim_eta_schedule_series = self.fi.parse_inbetweens(anim_args.ddim_eta_schedule, 'ddim_eta_schedule')
46
+ self.ancestral_eta_schedule_series = self.fi.parse_inbetweens(anim_args.ancestral_eta_schedule, 'ancestral_eta_schedule')
47
+ self.pix2pix_img_cfg_scale_series = self.fi.parse_inbetweens(anim_args.pix2pix_img_cfg_scale_schedule, 'pix2pix_img_cfg_scale_schedule')
48
+ self.subseed_schedule_series = self.fi.parse_inbetweens(anim_args.subseed_schedule, 'subseed_schedule')
49
+ self.subseed_strength_schedule_series = self.fi.parse_inbetweens(anim_args.subseed_strength_schedule, 'subseed_strength_schedule')
50
+ self.checkpoint_schedule_series = self.fi.parse_inbetweens(anim_args.checkpoint_schedule, 'checkpoint_schedule', is_single_string = True)
51
+ self.steps_schedule_series = self.fi.parse_inbetweens(anim_args.steps_schedule, 'steps_schedule')
52
+ self.seed_schedule_series = self.fi.parse_inbetweens(anim_args.seed_schedule, 'seed_schedule')
53
+ self.sampler_schedule_series = self.fi.parse_inbetweens(anim_args.sampler_schedule, 'sampler_schedule', is_single_string = True)
54
+ self.clipskip_schedule_series = self.fi.parse_inbetweens(anim_args.clipskip_schedule, 'clipskip_schedule')
55
+ self.noise_multiplier_schedule_series = self.fi.parse_inbetweens(anim_args.noise_multiplier_schedule, 'noise_multiplier_schedule')
56
+ self.mask_schedule_series = self.fi.parse_inbetweens(anim_args.mask_schedule, 'mask_schedule', is_single_string = True)
57
+ self.noise_mask_schedule_series = self.fi.parse_inbetweens(anim_args.noise_mask_schedule, 'noise_mask_schedule', is_single_string = True)
58
+ self.kernel_schedule_series = self.fi.parse_inbetweens(anim_args.kernel_schedule, 'kernel_schedule')
59
+ self.sigma_schedule_series = self.fi.parse_inbetweens(anim_args.sigma_schedule, 'sigma_schedule')
60
+ self.amount_schedule_series = self.fi.parse_inbetweens(anim_args.amount_schedule, 'amount_schedule')
61
+ self.threshold_schedule_series = self.fi.parse_inbetweens(anim_args.threshold_schedule, 'threshold_schedule')
62
+ self.aspect_ratio_series = self.fi.parse_inbetweens(anim_args.aspect_ratio_schedule, 'aspect_ratio_schedule')
63
+ self.fov_series = self.fi.parse_inbetweens(anim_args.fov_schedule, 'fov_schedule')
64
+ self.near_series = self.fi.parse_inbetweens(anim_args.near_schedule, 'near_schedule')
65
+ self.cadence_flow_factor_schedule_series = self.fi.parse_inbetweens(anim_args.cadence_flow_factor_schedule, 'cadence_flow_factor_schedule')
66
+ self.redo_flow_factor_schedule_series = self.fi.parse_inbetweens(anim_args.redo_flow_factor_schedule, 'redo_flow_factor_schedule')
67
+ self.far_series = self.fi.parse_inbetweens(anim_args.far_schedule, 'far_schedule')
68
+ self.hybrid_comp_alpha_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_comp_alpha_schedule, 'hybrid_comp_alpha_schedule')
69
+ self.hybrid_comp_mask_blend_alpha_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_comp_mask_blend_alpha_schedule, 'hybrid_comp_mask_blend_alpha_schedule')
70
+ self.hybrid_comp_mask_contrast_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_comp_mask_contrast_schedule, 'hybrid_comp_mask_contrast_schedule')
71
+ self.hybrid_comp_mask_auto_contrast_cutoff_high_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_comp_mask_auto_contrast_cutoff_high_schedule, 'hybrid_comp_mask_auto_contrast_cutoff_high_schedule')
72
+ self.hybrid_comp_mask_auto_contrast_cutoff_low_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_comp_mask_auto_contrast_cutoff_low_schedule, 'hybrid_comp_mask_auto_contrast_cutoff_low_schedule')
73
+ self.hybrid_flow_factor_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_flow_factor_schedule, 'hybrid_flow_factor_schedule')
74
+
75
+ class ControlNetKeys():
76
+ def __init__(self, anim_args, controlnet_args):
77
+ self.fi = FrameInterpolater(max_frames=anim_args.max_frames)
78
+ self.schedules = {}
79
+ max_models = shared.opts.data.get("control_net_unit_count", shared.opts.data.get("control_net_max_models_num", 5))
80
+ num_of_models = 5
81
+ num_of_models = num_of_models if max_models <= 5 else max_models
82
+ for i in range(1, num_of_models + 1):
83
+ for suffix in ['weight', 'guidance_start', 'guidance_end']:
84
+ prefix = f"cn_{i}"
85
+ input_key = f"{prefix}_{suffix}"
86
+ output_key = f"{input_key}_schedule_series"
87
+ self.schedules[output_key] = self.fi.parse_inbetweens(getattr(controlnet_args, input_key), input_key)
88
+ setattr(self, output_key, self.schedules[output_key])
89
+
90
+ class LooperAnimKeys():
91
+ def __init__(self, loop_args, anim_args, seed):
92
+ self.fi = FrameInterpolater(anim_args.max_frames, seed)
93
+ self.use_looper = loop_args.use_looper
94
+ self.imagesToKeyframe = loop_args.init_images
95
+ self.image_strength_schedule_series = self.fi.parse_inbetweens(loop_args.image_strength_schedule, 'image_strength_schedule')
96
+ self.blendFactorMax_series = self.fi.parse_inbetweens(loop_args.blendFactorMax, 'blendFactorMax')
97
+ self.blendFactorSlope_series = self.fi.parse_inbetweens(loop_args.blendFactorSlope, 'blendFactorSlope')
98
+ self.tweening_frames_schedule_series = self.fi.parse_inbetweens(loop_args.tweening_frames_schedule, 'tweening_frames_schedule')
99
+ self.color_correction_factor_series = self.fi.parse_inbetweens(loop_args.color_correction_factor, 'color_correction_factor')
100
+
101
+ class FrameInterpolater():
102
+ def __init__(self, max_frames=0, seed=-1) -> None:
103
+ self.max_frames = max_frames
104
+ self.seed = seed
105
+
106
+ def parse_inbetweens(self, value, filename = 'unknown', is_single_string = False):
107
+ return self.get_inbetweens(self.parse_key_frames(value, filename = filename), filename = filename, is_single_string = is_single_string)
108
+
109
+ def sanitize_value(self, value):
110
+ return value.replace("'","").replace('"',"").replace('(',"").replace(')',"")
111
+
112
+ def get_inbetweens(self, key_frames, integer=False, interp_method='Linear', is_single_string = False, filename = 'unknown'):
113
+ key_frame_series = pd.Series([np.nan for a in range(self.max_frames)])
114
+ # get our ui variables set for numexpr.evaluate
115
+ max_f = self.max_frames -1
116
+ s = self.seed
117
+ for i in range(0, self.max_frames):
118
+ if i in key_frames:
119
+ value = key_frames[i]
120
+ sanitized_value = self.sanitize_value(value)
121
+ value_is_number = check_is_number(sanitized_value)
122
+ if value_is_number: # if it's only a number, leave the rest for the default interpolation
123
+ key_frame_series[i] = sanitized_value
124
+ if not value_is_number:
125
+ t = i
126
+ # workaround for values formatted like 0:("I am test") //used for sampler schedules
127
+ try:
128
+ key_frame_series[i] = numexpr.evaluate(value) if not is_single_string else sanitized_value
129
+ except SyntaxError as e:
130
+ e.filename = f"{filename}@frame#{i}"
131
+ raise e
132
+ elif is_single_string:# take previous string value and replicate it
133
+ key_frame_series[i] = key_frame_series[i-1]
134
+ key_frame_series = key_frame_series.astype(float) if not is_single_string else key_frame_series # as string
135
+
136
+ if interp_method == 'Cubic' and len(key_frames.items()) <= 3:
137
+ interp_method = 'Quadratic'
138
+ if interp_method == 'Quadratic' and len(key_frames.items()) <= 2:
139
+ interp_method = 'Linear'
140
+
141
+ key_frame_series[0] = key_frame_series[key_frame_series.first_valid_index()]
142
+ key_frame_series[self.max_frames-1] = key_frame_series[key_frame_series.last_valid_index()]
143
+ key_frame_series = key_frame_series.interpolate(method=interp_method.lower(), limit_direction='both')
144
+ if integer:
145
+ return key_frame_series.astype(int)
146
+ return key_frame_series
147
+
148
+ def parse_key_frames(self, string, filename='unknown'):
149
+ # because math functions (i.e. sin(t)) can utilize brackets
150
+ # it extracts the value in form of some stuff
151
+ # which has previously been enclosed with brackets and
152
+ # with a comma or end of line existing after the closing one
153
+ frames = dict()
154
+ for match_object in string.split(","):
155
+ frameParam = match_object.split(":")
156
+ max_f = self.max_frames -1
157
+ s = self.seed
158
+ try:
159
+ frame = int(self.sanitize_value(frameParam[0])) if check_is_number(self.sanitize_value(frameParam[0].strip())) else int(numexpr.evaluate(frameParam[0].strip().replace("'","",1).replace('"',"",1)[::-1].replace("'","",1).replace('"',"",1)[::-1]))
160
+ frames[frame] = frameParam[1].strip()
161
+ except SyntaxError as e:
162
+ e.filename = filename
163
+ raise e
164
+ if frames == {} and len(string) != 0:
165
+ raise RuntimeError('Key Frame string not correctly formatted')
166
+ return frames
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/args.py ADDED
@@ -0,0 +1,1179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import json
18
+ import os
19
+ import tempfile
20
+ import time
21
+ from types import SimpleNamespace
22
+ import modules.paths as ph
23
+ import modules.shared as sh
24
+ from modules.processing import get_fixed_seed
25
+ from .defaults import get_guided_imgs_default_json, mask_fill_choices, get_samplers_list
26
+ from .deforum_controlnet import controlnet_component_names
27
+ from .general_utils import get_os, substitute_placeholders
28
+
29
+ from PIL import Image
30
+ import pathlib
31
+
32
+ def RootArgs():
33
+ return {
34
+ "device": sh.device,
35
+ "models_path": ph.models_path + '/Deforum',
36
+ "half_precision": not sh.cmd_opts.no_half,
37
+ "clipseg_model": None,
38
+ "mask_preset_names": ['everywhere', 'video_mask'],
39
+ "frames_cache": [],
40
+ "raw_batch_name": None,
41
+ "raw_seed": None,
42
+ "timestring": "",
43
+ "subseed": -1,
44
+ "subseed_strength": 0,
45
+ "seed_internal": 0,
46
+ "init_sample": None,
47
+ "noise_mask": None,
48
+ "initial_info": None,
49
+ "first_frame": None,
50
+ "animation_prompts": None,
51
+ "current_user_os": get_os(),
52
+ "tmp_deforum_run_duplicated_folder": os.path.join(tempfile.gettempdir(), 'tmp_run_deforum')
53
+ }
54
+
55
+ # 'Midas-3.1-BeitLarge' is temporarily removed until fixed. Can add it back anytime as it's supported in the back-end depth code
56
+ def DeforumAnimArgs():
57
+ return {
58
+ "animation_mode": {
59
+ "label": "Animation mode",
60
+ "type": "radio",
61
+ "choices": ['2D', '3D', 'Video Input', 'Interpolation'],
62
+ "value": "2D",
63
+ "info": "control animation mode, will hide non relevant params upon change"
64
+ },
65
+ "max_frames": {
66
+ "label": "Max frames",
67
+ "type": "number",
68
+ "precision": 0,
69
+ "value": 120,
70
+ "info": "end the animation at this frame number",
71
+ },
72
+ "border": {
73
+ "label": "Border mode",
74
+ "type": "radio",
75
+ "choices": ['replicate', 'wrap'],
76
+ "value": "replicate",
77
+ "info": "controls pixel generation method for images smaller than the frame. hover on the options to see more info"
78
+ },
79
+ "angle": {
80
+ "label": "Angle",
81
+ "type": "textbox",
82
+ "value": "0: (0)",
83
+ "info": "rotate canvas clockwise/anticlockwise in degrees per frame"
84
+ },
85
+
86
+ "zoom": {
87
+ "label": "Zoom",
88
+ "type": "textbox",
89
+ "value": "0: (1.0025+0.002*sin(1.25*3.14*t/30))",
90
+ "info": "scale the canvas size, multiplicatively. [static = 1.0]"
91
+ },
92
+
93
+ "translation_x": {
94
+ "label": "Translation X",
95
+ "type": "textbox",
96
+ "value": "0: (0)",
97
+ "info": "move canvas left/right in pixels per frame"
98
+ },
99
+
100
+ "translation_y": {
101
+ "label": "Translation Y",
102
+ "type": "textbox",
103
+ "value": "0: (0)",
104
+ "info": "move canvas up/down in pixels per frame"
105
+ },
106
+ "translation_z": {
107
+ "label": "Translation Z",
108
+ "type": "textbox",
109
+ "value": "0: (1.75)",
110
+ "info": "move canvas towards/away from view [speed set by FOV]"
111
+ },
112
+ "transform_center_x": {
113
+ "label": "Transform Center X",
114
+ "type": "textbox",
115
+ "value": "0: (0.5)",
116
+ "info": "X center axis for 2D angle/zoom"
117
+ },
118
+
119
+ "transform_center_y": {
120
+ "label": "Transform Center Y",
121
+ "type": "textbox",
122
+ "value": "0: (0.5)",
123
+ "info": "Y center axis for 2D angle/zoom"
124
+ },
125
+ "rotation_3d_x": {
126
+ "label": "Rotation 3D X",
127
+ "type": "textbox",
128
+ "value": "0: (0)",
129
+ "info": "tilt canvas up/down in degrees per frame"
130
+ },
131
+ "rotation_3d_y": {
132
+ "label": "Rotation 3D Y",
133
+ "type": "textbox",
134
+ "value": "0: (0)",
135
+ "info": "pan canvas left/right in degrees per frame"
136
+ },
137
+ "rotation_3d_z": {
138
+ "label": "Rotation 3D Z",
139
+ "type": "textbox",
140
+ "value": "0: (0)",
141
+ "info": "roll canvas clockwise/anticlockwise"
142
+ },
143
+ "enable_perspective_flip": {
144
+ "label": "Enable perspective flip",
145
+ "type": "checkbox",
146
+ "value": False,
147
+ "info": ""
148
+ },
149
+ "perspective_flip_theta": {
150
+ "label": "Perspective flip theta",
151
+ "type": "textbox",
152
+ "value": "0: (0)",
153
+ "info": ""
154
+ },
155
+ "perspective_flip_phi": {
156
+ "label": "Perspective flip phi",
157
+ "type": "textbox",
158
+ "value": "0: (0)",
159
+ "info": ""
160
+ },
161
+ "perspective_flip_gamma": {
162
+ "label": "Perspective flip gamma",
163
+ "type": "textbox",
164
+ "value": "0: (0)",
165
+ "info": ""
166
+ },
167
+ "perspective_flip_fv": {
168
+ "label": "Perspective flip tv",
169
+ "type": "textbox",
170
+ "value": "0: (53)",
171
+ "info": "the 2D vanishing point of perspective (rec. range 30-160)"
172
+ },
173
+ "noise_schedule": {
174
+ "label": "Noise schedule",
175
+ "type": "textbox",
176
+ "value": "0: (0.065)",
177
+ "info": ""
178
+ },
179
+ "strength_schedule": {
180
+ "label": "Strength schedule",
181
+ "type": "textbox",
182
+ "value": "0: (0.65)",
183
+ "info": "amount of presence of previous frame to influence next frame, also controls steps in the following formula [steps - (strength_schedule * steps)]"
184
+ },
185
+ "contrast_schedule": "0: (1.0)",
186
+ "cfg_scale_schedule": {
187
+ "label": "CFG scale schedule",
188
+ "type": "textbox",
189
+ "value": "0: (7)",
190
+ "info": "how closely the image should conform to the prompt. Lower values produce more creative results. (recommended range 5-15)`"
191
+ },
192
+ "enable_steps_scheduling": {
193
+ "label": "Enable steps scheduling",
194
+ "type": "checkbox",
195
+ "value": False,
196
+ "info": ""
197
+ },
198
+ "steps_schedule": {
199
+ "label": "Steps schedule",
200
+ "type": "textbox",
201
+ "value": "0: (25)",
202
+ "info": "mainly allows using more than 200 steps. Otherwise, it's a mirror-like param of 'strength schedule'"
203
+ },
204
+ "fov_schedule": {
205
+ "label": "FOV schedule",
206
+ "type": "textbox",
207
+ "value": "0: (70)",
208
+ "info": "adjusts the scale at which the canvas is moved in 3D by the translation_z value. [Range -180 to +180, with 0 being undefined. Values closer to 180 will make the image have less depth, while values closer to 0 will allow more depth]"
209
+ },
210
+ "aspect_ratio_schedule": {
211
+ "label": "Aspect Ratio schedule",
212
+ "type": "textbox",
213
+ "value": "0: (1)",
214
+ "info": "adjusts the aspect ratio for the depth calculations"
215
+ },
216
+ "aspect_ratio_use_old_formula": {
217
+ "label": "Use old aspect ratio formula",
218
+ "type": "checkbox",
219
+ "value": False,
220
+ "info": "for backward compatibility. Uses the formula: `width/height`"
221
+ },
222
+ "near_schedule": {
223
+ "label": "Near schedule",
224
+ "type": "textbox",
225
+ "value": "0: (200)",
226
+ "info": ""
227
+ },
228
+ "far_schedule": {
229
+ "label": "Far schedule",
230
+ "type": "textbox",
231
+ "value": "0: (10000)",
232
+ "info": ""
233
+ },
234
+ "seed_schedule": {
235
+ "label": "Seed schedule",
236
+ "type": "textbox",
237
+ "value": '0:(s), 1:(-1), "max_f-2":(-1), "max_f-1":(s)',
238
+ "info": ""
239
+ },
240
+ "pix2pix_img_cfg_scale_schedule": {
241
+ "label": "Pix2Pix img CFG schedule",
242
+ "type": "textbox",
243
+ "value": "0:(1.5)",
244
+ "info": "ONLY in use when working with a P2P ckpt!"
245
+ },
246
+ "enable_subseed_scheduling": {
247
+ "label": "Enable Subseed scheduling",
248
+ "type": "checkbox",
249
+ "value": False,
250
+ "info": ""
251
+ },
252
+ "subseed_schedule": {
253
+ "label": "Subseed schedule",
254
+ "type": "textbox",
255
+ "value": "0: (1)",
256
+ "info": ""
257
+ },
258
+ "subseed_strength_schedule": {
259
+ "label": "Subseed strength schedule",
260
+ "type": "textbox",
261
+ "value": "0: (0)",
262
+ "info": ""
263
+ },
264
+ "enable_sampler_scheduling": {
265
+ "label": "Enable sampler scheduling",
266
+ "type": "checkbox",
267
+ "value": False,
268
+ "info": ""
269
+ },
270
+ "sampler_schedule": {
271
+ "label": "Sampler schedule",
272
+ "type": "textbox",
273
+ "value": '0: ("Euler a")',
274
+ "info": "allows keyframing of samplers. Use names as they appear in ui dropdown in 'run' tab"
275
+ },
276
+ "use_noise_mask": {
277
+ "label": "Use noise mask",
278
+ "type": "checkbox",
279
+ "value": False,
280
+ "info": ""
281
+ },
282
+ "mask_schedule": {
283
+ "label": "Mask schedule",
284
+ "type": "textbox",
285
+ "value": '0: ("{video_mask}")',
286
+ "info": ""
287
+ },
288
+ "noise_mask_schedule": {
289
+ "label": "Noise mask schedule",
290
+ "type": "textbox",
291
+ "value": '0: ("{video_mask}")',
292
+ "info": ""
293
+ },
294
+ "enable_checkpoint_scheduling": {
295
+ "label": "Enable checkpoint scheduling",
296
+ "type": "checkbox",
297
+ "value": False,
298
+ "info": ""
299
+ },
300
+ "checkpoint_schedule": {
301
+ "label": "allows keyframing different sd models. Use *full* name as appears in ui dropdown",
302
+ "type": "textbox",
303
+ "value": '0: ("model1.ckpt"), 100: ("model2.safetensors")',
304
+ "info": "allows keyframing different sd models. Use *full* name as appears in ui dropdown"
305
+ },
306
+ "enable_clipskip_scheduling": {
307
+ "label": "Enable CLIP skip scheduling",
308
+ "type": "checkbox",
309
+ "value": False,
310
+ "info": ""
311
+ },
312
+ "clipskip_schedule": {
313
+ "label": "CLIP skip schedule",
314
+ "type": "textbox",
315
+ "value": "0: (2)",
316
+ "info": ""
317
+ },
318
+ "enable_noise_multiplier_scheduling": {
319
+ "label": "Enable noise multiplier scheduling",
320
+ "type": "checkbox",
321
+ "value": True,
322
+ "info": ""
323
+ },
324
+ "noise_multiplier_schedule": {
325
+ "label": "Noise multiplier schedule",
326
+ "type": "textbox",
327
+ "value": "0: (1.05)",
328
+ "info": ""
329
+ },
330
+ "resume_from_timestring": {
331
+ "label": "Resume from timestring",
332
+ "type": "checkbox",
333
+ "value": False,
334
+ "info": ""
335
+ },
336
+ "resume_timestring": {
337
+ "label": "Resume timestring",
338
+ "type": "textbox",
339
+ "value": "20230129210106",
340
+ "info": ""
341
+ },
342
+ "enable_ddim_eta_scheduling": {
343
+ "label": "Enable DDIM ETA scheduling",
344
+ "type": "checkbox",
345
+ "value": False,
346
+ "visible": False,
347
+ "info": "noise multiplier; higher = more unpredictable results"
348
+ },
349
+ "ddim_eta_schedule": {
350
+ "label": "DDIM ETA Schedule",
351
+ "type": "textbox",
352
+ "value": "0: (0)",
353
+ "visible": False,
354
+ "info": ""
355
+ },
356
+ "enable_ancestral_eta_scheduling": {
357
+ "label": "Enable Ancestral ETA scheduling",
358
+ "type": "checkbox",
359
+ "value": False,
360
+ "info": "noise multiplier; applies to Euler A and other samplers that have the letter 'a' in them"
361
+ },
362
+ "ancestral_eta_schedule": {
363
+ "label": "Ancestral ETA Schedule",
364
+ "type": "textbox",
365
+ "value": "0: (1)",
366
+ "visible": False,
367
+ "info": ""
368
+ },
369
+ "amount_schedule": {
370
+ "label": "Amount schedule",
371
+ "type": "textbox",
372
+ "value": "0: (0.1)",
373
+ "info": ""
374
+ },
375
+ "kernel_schedule": {
376
+ "label": "Kernel schedule",
377
+ "type": "textbox",
378
+ "value": "0: (5)",
379
+ "info": ""
380
+ },
381
+ "sigma_schedule": {
382
+ "label": "Sigma schedule",
383
+ "type": "textbox",
384
+ "value": "0: (1)",
385
+ "info": ""
386
+ },
387
+ "threshold_schedule": {
388
+ "label": "Threshold schedule",
389
+ "type": "textbox",
390
+ "value": "0: (0)",
391
+ "info": ""
392
+ },
393
+ "color_coherence": {
394
+ "label": "Color coherence",
395
+ "type": "dropdown",
396
+ "choices": ['None', 'HSV', 'LAB', 'RGB', 'Video Input', 'Image'],
397
+ "value": "LAB",
398
+ "info": "choose an algorithm/ method for keeping color coherence across the animation"
399
+ },
400
+ "color_coherence_image_path": {
401
+ "label": "Color coherence image path",
402
+ "type": "textbox",
403
+ "value": "",
404
+ "info": ""
405
+ },
406
+ "color_coherence_video_every_N_frames": {
407
+ "label": "Color coherence video every N frames",
408
+ "type": "number",
409
+ "precision": 0,
410
+ "value": 1,
411
+ "info": "",
412
+ },
413
+ "color_force_grayscale": {
414
+ "label": "Color force Grayscale",
415
+ "type": "checkbox",
416
+ "value": False,
417
+ "info": "force all frames to be in grayscale"
418
+ },
419
+ "legacy_colormatch": {
420
+ "label": "Legacy colormatch",
421
+ "type": "checkbox",
422
+ "value": False,
423
+ "info": "apply colormatch before adding noise (use with CN's Tile)"
424
+ },
425
+ "diffusion_cadence": {
426
+ "label": "Cadence",
427
+ "type": "slider",
428
+ "minimum": 1,
429
+ "maximum": 50,
430
+ "step": 1,
431
+ "value": 1,
432
+ "info": "# of in-between frames that will not be directly diffused"
433
+ },
434
+ "optical_flow_cadence": {
435
+ "label": "Optical flow cadence",
436
+ "type": "dropdown",
437
+ "choices": ['None', 'RAFT', 'DIS Medium', 'DIS Fine', 'Farneback'],
438
+ "value": "None",
439
+ "info": "use optical flow estimation for your in-between (cadence) frames"
440
+ },
441
+ "cadence_flow_factor_schedule": {
442
+ "label": "Cadence flow factor schedule",
443
+ "type": "textbox",
444
+ "value": "0: (1)",
445
+ "info": ""
446
+ },
447
+ "optical_flow_redo_generation": {
448
+ "label": "Optical flow generation",
449
+ "type": "dropdown",
450
+ "choices": ['None', 'RAFT', 'DIS Medium', 'DIS Fine', 'Farneback'],
451
+ "value": "None",
452
+ "info": "this option takes twice as long because it generates twice in order to capture the optical flow from the previous image to the first generation, then warps the previous image and redoes the generation"
453
+ },
454
+ "redo_flow_factor_schedule": {
455
+ "label": "Generation flow factor schedule",
456
+ "type": "textbox",
457
+ "value": "0: (1)",
458
+ "info": ""
459
+ },
460
+ "diffusion_redo": '0',
461
+ "noise_type": {
462
+ "label": "Noise type",
463
+ "type": "radio",
464
+ "choices": ['uniform', 'perlin'],
465
+ "value": "perlin",
466
+ "info": ""
467
+ },
468
+ "perlin_w": {
469
+ "label": "Perlin W",
470
+ "type": "slider",
471
+ "minimum": 0.1,
472
+ "maximum": 16,
473
+ "step": 0.1,
474
+ "value": 8,
475
+ "visible": False
476
+ },
477
+ "perlin_h": {
478
+ "label": "Perlin H",
479
+ "type": "slider",
480
+ "minimum": 0.1,
481
+ "maximum": 16,
482
+ "step": 0.1,
483
+ "value": 8,
484
+ "visible": False
485
+ },
486
+ "perlin_octaves": {
487
+ "label": "Perlin octaves",
488
+ "type": "slider",
489
+ "minimum": 1,
490
+ "maximum": 7,
491
+ "step": 1,
492
+ "value": 4
493
+ },
494
+ "perlin_persistence": {
495
+ "label": "Perlin persistence",
496
+ "type": "slider",
497
+ "minimum": 0,
498
+ "maximum": 1,
499
+ "step": 0.02,
500
+ "value": 0.5
501
+ },
502
+ "use_depth_warping": {
503
+ "label": "Use depth warping",
504
+ "type": "checkbox",
505
+ "value": True,
506
+ "info": ""
507
+ },
508
+ "depth_algorithm": {
509
+ "label": "Depth Algorithm",
510
+ "type": "dropdown",
511
+ "choices": ['Midas+AdaBins (old)', 'Zoe+AdaBins (old)', 'Midas-3-Hybrid', 'AdaBins', 'Zoe', 'Leres'],
512
+ "value": "Midas-3-Hybrid",
513
+ "info": "choose an algorithm/ method for keeping color coherence across the animation"
514
+ },
515
+ "midas_weight": {
516
+ "label": "MiDaS/Zoe weight",
517
+ "type": "number",
518
+ "precision": None,
519
+ "value": 0.2,
520
+ "info": "sets a midpoint at which a depth-map is to be drawn: range [-1 to +1]",
521
+ "visible": False
522
+ },
523
+ "padding_mode": {
524
+ "label": "Padding mode",
525
+ "type": "radio",
526
+ "choices": ['border', 'reflection', 'zeros'],
527
+ "value": "border",
528
+ "info": "controls the handling of pixels outside the field of view as they come into the scene"
529
+ },
530
+ "sampling_mode": {
531
+ "label": "Padding mode",
532
+ "type": "radio",
533
+ "choices": ['bicubic', 'bilinear', 'nearest'],
534
+ "value": "bicubic",
535
+ "info": ""
536
+ },
537
+ "save_depth_maps": {
538
+ "label": "Save 3D depth maps",
539
+ "type": "checkbox",
540
+ "value": False,
541
+ "info": "save animation's depth maps as extra files"
542
+ },
543
+ "video_init_path": {
544
+ "label": "Video init path/ URL",
545
+ "type": "textbox",
546
+ "value": 'https://deforum.github.io/a1/V1.mp4',
547
+ "info": ""
548
+ },
549
+ "extract_nth_frame": {
550
+ "label": "Extract nth frame",
551
+ "type": "number",
552
+ "precision": 0,
553
+ "value": 1,
554
+ "info": ""
555
+ },
556
+ "extract_from_frame": {
557
+ "label": "Extract from frame",
558
+ "type": "number",
559
+ "precision": 0,
560
+ "value": 0,
561
+ "info": ""
562
+ },
563
+ "extract_to_frame": {
564
+ "label": "Extract to frame",
565
+ "type": "number",
566
+ "precision": 0,
567
+ "value": -1,
568
+ "info": ""
569
+ },
570
+ "overwrite_extracted_frames": {
571
+ "label": "Overwrite extracted frames",
572
+ "type": "checkbox",
573
+ "value": False,
574
+ "info": ""
575
+ },
576
+ "use_mask_video": {
577
+ "label": "Use mask video",
578
+ "type": "checkbox",
579
+ "value": False,
580
+ "info": ""
581
+ },
582
+ "video_mask_path": {
583
+ "label": "Video mask path",
584
+ "type": "textbox",
585
+ "value": 'https://deforum.github.io/a1/VM1.mp4',
586
+ "info": ""
587
+ },
588
+ "hybrid_comp_alpha_schedule": {
589
+ "label": "Comp alpha schedule",
590
+ "type": "textbox",
591
+ "value": "0:(0.5)",
592
+ "info": ""
593
+ },
594
+ "hybrid_comp_mask_blend_alpha_schedule": {
595
+ "label": "Comp mask blend alpha schedule",
596
+ "type": "textbox",
597
+ "value": "0:(0.5)",
598
+ "info": ""
599
+ },
600
+ "hybrid_comp_mask_contrast_schedule": {
601
+ "label": "Comp mask contrast schedule",
602
+ "type": "textbox",
603
+ "value": "0:(1)",
604
+ "info": ""
605
+ },
606
+ "hybrid_comp_mask_auto_contrast_cutoff_high_schedule": {
607
+ "label": "Comp mask auto contrast cutoff high schedule",
608
+ "type": "textbox",
609
+ "value": "0:(100)",
610
+ "info": ""
611
+ },
612
+ "hybrid_comp_mask_auto_contrast_cutoff_low_schedule": {
613
+ "label": "Comp mask auto contrast cutoff low schedule",
614
+ "type": "textbox",
615
+ "value": "0:(0)",
616
+ "info": ""
617
+ },
618
+ "hybrid_flow_factor_schedule": {
619
+ "label": "Flow factor schedule",
620
+ "type": "textbox",
621
+ "value": "0:(1)",
622
+ "info": ""
623
+ },
624
+ "hybrid_generate_inputframes": {
625
+ "label": "Generate inputframes",
626
+ "type": "checkbox",
627
+ "value": False,
628
+ "info": ""
629
+ },
630
+ "hybrid_generate_human_masks": {
631
+ "label": "Generate human masks",
632
+ "type": "radio",
633
+ "choices": ['None', 'PNGs', 'Video', 'Both'],
634
+ "value": "None",
635
+ "info": ""
636
+ },
637
+ "hybrid_use_first_frame_as_init_image": {
638
+ "label": "First frame as init image",
639
+ "type": "checkbox",
640
+ "value": True,
641
+ "info": "",
642
+ "visible": False
643
+ },
644
+ "hybrid_motion": {
645
+ "label": "Hybrid motion",
646
+ "type": "radio",
647
+ "choices": ['None', 'Optical Flow', 'Perspective', 'Affine'],
648
+ "value": "None",
649
+ "info": ""
650
+ },
651
+ "hybrid_motion_use_prev_img": {
652
+ "label": "Motion use prev img",
653
+ "type": "checkbox",
654
+ "value": False,
655
+ "info": "",
656
+ "visible": False
657
+ },
658
+ "hybrid_flow_consistency": {
659
+ "label": "Flow consistency mask",
660
+ "type": "checkbox",
661
+ "value": False,
662
+ "info": "",
663
+ "visible": False
664
+ },
665
+ "hybrid_consistency_blur": {
666
+ "label": "Consistency mask blur",
667
+ "type": "slider",
668
+ "minimum": 0,
669
+ "maximum": 16,
670
+ "step": 1,
671
+ "value": 2,
672
+ "visible": False
673
+ },
674
+ "hybrid_flow_method": {
675
+ "label": "Flow method",
676
+ "type": "radio",
677
+ "choices": ['RAFT', 'DIS Medium', 'DIS Fine', 'Farneback'],
678
+ "value": "RAFT",
679
+ "info": "",
680
+ "visible": False
681
+ },
682
+ "hybrid_composite": 'None', # ['None', 'Normal', 'Before Motion', 'After Generation']
683
+ "hybrid_use_init_image": {
684
+ "label": "Use init image as video",
685
+ "type": "checkbox",
686
+ "value": False,
687
+ "info": "",
688
+ },
689
+ "hybrid_comp_mask_type": {
690
+ "label": "Comp mask type",
691
+ "type": "radio",
692
+ "choices": ['None', 'Depth', 'Video Depth', 'Blend', 'Difference'],
693
+ "value": "None",
694
+ "info": "",
695
+ "visible": False
696
+ },
697
+ "hybrid_comp_mask_inverse": False,
698
+ "hybrid_comp_mask_equalize": {
699
+ "label": "Comp mask equalize",
700
+ "type": "radio",
701
+ "choices": ['None', 'Before', 'After', 'Both'],
702
+ "value": "None",
703
+ "info": "",
704
+ },
705
+ "hybrid_comp_mask_auto_contrast": False,
706
+ "hybrid_comp_save_extra_frames": False
707
+ }
708
+
709
+ def DeforumArgs():
710
+ return {
711
+ "W": {
712
+ "label": "Width",
713
+ "type": "slider",
714
+ "minimum": 64,
715
+ "maximum": 2048,
716
+ "step": 64,
717
+ "value": 512,
718
+ },
719
+ "H": {
720
+ "label": "Height",
721
+ "type": "slider",
722
+ "minimum": 64,
723
+ "maximum": 2048,
724
+ "step": 64,
725
+ "value": 512,
726
+ },
727
+ "show_info_on_ui": True,
728
+ "tiling": {
729
+ "label": "Tiling",
730
+ "type": "checkbox",
731
+ "value": False,
732
+ "info": "enable for seamless-tiling of each generated image. Experimental"
733
+ },
734
+ "restore_faces": {
735
+ "label": "Restore faces",
736
+ "type": "checkbox",
737
+ "value": False,
738
+ "info": "enable to trigger webui's face restoration on each frame during the generation"
739
+ },
740
+ "seed_resize_from_w": {
741
+ "label": "Resize seed from width",
742
+ "type": "slider",
743
+ "minimum": 0,
744
+ "maximum": 2048,
745
+ "step": 64,
746
+ "value": 0,
747
+ },
748
+ "seed_resize_from_h": {
749
+ "label": "Resize seed from height",
750
+ "type": "slider",
751
+ "minimum": 0,
752
+ "maximum": 2048,
753
+ "step": 64,
754
+ "value": 0,
755
+ },
756
+ "seed": {
757
+ "label": "Seed",
758
+ "type": "number",
759
+ "precision": 0,
760
+ "value": -1,
761
+ "info": "Starting seed for the animation. -1 for random"
762
+ },
763
+ "sampler": {
764
+ "label": "Sampler",
765
+ "type": "dropdown",
766
+ "choices": get_samplers_list().values(),
767
+ "value": "Euler a",
768
+ },
769
+ "steps": {
770
+ "label": "Steps",
771
+ "type": "slider",
772
+ "minimum": 1,
773
+ "maximum": 200,
774
+ "step": 1,
775
+ "value": 25,
776
+ },
777
+ "batch_name": {
778
+ "label": "Batch name",
779
+ "type": "textbox",
780
+ "value": "Deforum_{timestring}",
781
+ "info": "output images will be placed in a folder with this name ({timestring} token will be replaced) inside the img2img output folder. Supports params placeholders. e.g {seed}, {w}, {h}, {prompts}"
782
+ },
783
+ "seed_behavior": {
784
+ "label": "Seed behavior",
785
+ "type": "radio",
786
+ "choices": ['iter', 'fixed', 'random', 'ladder', 'alternate', 'schedule'],
787
+ "value": "iter",
788
+ "info": "controls the seed behavior that is used for animation. Hover on the options to see more info"
789
+ },
790
+ "seed_iter_N": {
791
+ "label": "Seed iter N",
792
+ "type": "number",
793
+ "precision": 0,
794
+ "value": 1,
795
+ "info": "for how many frames the same seed should stick before iterating to the next one"
796
+ },
797
+ "use_init": {
798
+ "label": "Use init",
799
+ "type": "checkbox",
800
+ "value": False,
801
+ "info": ""
802
+ },
803
+ "strength": {
804
+ "label": "strength",
805
+ "type": "slider",
806
+ "minimum": 0,
807
+ "maximum": 1,
808
+ "step": 0.01,
809
+ "value": 0.8,
810
+ },
811
+ "strength_0_no_init": {
812
+ "label": "Strength 0 no init",
813
+ "type": "checkbox",
814
+ "value": True,
815
+ "info": ""
816
+ },
817
+ "init_image": {
818
+ "label": "Init image URL",
819
+ "type": "textbox",
820
+ "value": "https://deforum.github.io/a1/I1.png",
821
+ "info": "Use web address or local path. Note: if the image box below is used then this field is ignored."
822
+ },
823
+ "init_image_box": {
824
+ "label": "Init image box",
825
+ "type": "image",
826
+ "type_param": "pil",
827
+ "source": "upload",
828
+ "interactive": True,
829
+ "info": ""
830
+ },
831
+ "use_mask": {
832
+ "label": "Use mask",
833
+ "type": "checkbox",
834
+ "value": False,
835
+ "info": ""
836
+ },
837
+ "use_alpha_as_mask": {
838
+ "label": "Use alpha as mask",
839
+ "type": "checkbox",
840
+ "value": False,
841
+ "info": ""
842
+ },
843
+ "mask_file": {
844
+ "label": "Mask file",
845
+ "type": "textbox",
846
+ "value": "https://deforum.github.io/a1/M1.jpg",
847
+ "info": ""
848
+ },
849
+ "invert_mask": {
850
+ "label": "Invert mask",
851
+ "type": "checkbox",
852
+ "value": False,
853
+ "info": ""
854
+ },
855
+ "mask_contrast_adjust": {
856
+ "label": "Mask contrast adjust",
857
+ "type": "number",
858
+ "precision": None,
859
+ "value": 1.0,
860
+ "info": ""
861
+ },
862
+ "mask_brightness_adjust": {
863
+ "label": "Mask brightness adjust",
864
+ "type": "number",
865
+ "precision": None,
866
+ "value": 1.0,
867
+ "info": ""
868
+ },
869
+ "overlay_mask": {
870
+ "label": "Overlay mask",
871
+ "type": "checkbox",
872
+ "value": True,
873
+ "info": ""
874
+ },
875
+ "mask_overlay_blur": {
876
+ "label": "Mask overlay blur",
877
+ "type": "slider",
878
+ "minimum": 0,
879
+ "maximum": 64,
880
+ "step": 1,
881
+ "value": 4,
882
+ },
883
+ "fill": {
884
+ "label": "Mask fill",
885
+ "type": "radio",
886
+ "type_param": "index",
887
+ "choices": ['fill', 'original', 'latent noise', 'latent nothing'],
888
+ "value": 'original',
889
+ "info": ""
890
+ },
891
+ "full_res_mask": {
892
+ "label": "Full res mask",
893
+ "type": "checkbox",
894
+ "value": True,
895
+ "info": ""
896
+ },
897
+ "full_res_mask_padding": {
898
+ "label": "Full res mask padding",
899
+ "type": "slider",
900
+ "minimum": 0,
901
+ "maximum": 512,
902
+ "step": 1,
903
+ "value": 4,
904
+ },
905
+ "reroll_blank_frames": {
906
+ "label": "Reroll blank frames",
907
+ "type": "radio",
908
+ "choices": ['reroll', 'interrupt', 'ignore'],
909
+ "value": "ignore",
910
+ "info": ""
911
+ },
912
+ "reroll_patience": {
913
+ "label": "Reroll patience",
914
+ "type": "number",
915
+ "precision": None,
916
+ "value": 10,
917
+ "info": ""
918
+ },
919
+ "motion_preview_mode": {
920
+ "label": "Motion preview mode (dry run).",
921
+ "type": "checkbox",
922
+ "value": False,
923
+ "info": "Preview motion only. Uses a static picture for init, and draw motion reference rectangle."
924
+ },
925
+ }
926
+
927
+ def LoopArgs():
928
+ return {
929
+ "use_looper": {
930
+ "label": "Enable guided images mode",
931
+ "type": "checkbox",
932
+ "value": False,
933
+ },
934
+ "init_images": {
935
+ "label": "Images to use for keyframe guidance",
936
+ "type": "textbox",
937
+ "lines": 9,
938
+ "value": get_guided_imgs_default_json(),
939
+ },
940
+ "image_strength_schedule": {
941
+ "label": "Image strength schedule",
942
+ "type": "textbox",
943
+ "value": "0:(0.75)",
944
+ },
945
+ "blendFactorMax": {
946
+ "label": "Blend factor max",
947
+ "type": "textbox",
948
+ "value": "0:(0.35)",
949
+ },
950
+ "blendFactorSlope": {
951
+ "label": "Blend factor slope",
952
+ "type": "textbox",
953
+ "value": "0:(0.25)",
954
+ },
955
+ "tweening_frames_schedule": {
956
+ "label": "Tweening frames schedule",
957
+ "type": "textbox",
958
+ "value": "0:(20)",
959
+ },
960
+ "color_correction_factor": {
961
+ "label": "Color correction factor",
962
+ "type": "textbox",
963
+ "value": "0:(0.075)",
964
+ }
965
+ }
966
+
967
+ def ParseqArgs():
968
+ return {
969
+ "parseq_manifest": {
970
+ "label": "Parseq Manifest (JSON or URL)",
971
+ "type": "textbox",
972
+ "lines": 4,
973
+ "value": None,
974
+ },
975
+ "parseq_use_deltas": {
976
+ "label": "Use delta values for movement parameters",
977
+ "type": "checkbox",
978
+ "value": True,
979
+ }
980
+ }
981
+
982
+ def DeforumOutputArgs():
983
+ return {
984
+ "skip_video_creation": {
985
+ "label": "Skip video creation",
986
+ "type": "checkbox",
987
+ "value": False,
988
+ "info": "If enabled, only images will be saved"
989
+ },
990
+ "fps": {
991
+ "label": "FPS",
992
+ "type": "slider",
993
+ "minimum": 1,
994
+ "maximum": 240,
995
+ "step": 1,
996
+ "value": 15,
997
+ },
998
+ "make_gif": {
999
+ "label": "Make GIF",
1000
+ "type": "checkbox",
1001
+ "value": False,
1002
+ "info": "make GIF in addition to the video/s"
1003
+ },
1004
+ "delete_imgs": {
1005
+ "label": "Delete Imgs",
1006
+ "type": "checkbox",
1007
+ "value": False,
1008
+ "info": "auto-delete imgs when video is ready. Will break Resume from timestring!"
1009
+ },
1010
+ "delete_input_frames": {
1011
+ "label": "Delete All Inputframes",
1012
+ "type": "checkbox",
1013
+ "value": False,
1014
+ "info": "auto-delete inputframes (incl CN ones) when video is ready"
1015
+ },
1016
+ "image_path": {
1017
+ "label": "Image path",
1018
+ "type": "textbox",
1019
+ "value": "C:/SD/20230124234916_%09d.png",
1020
+ },
1021
+ "add_soundtrack": {
1022
+ "label": "Add soundtrack",
1023
+ "type": "radio",
1024
+ "choices": ['None', 'File', 'Init Video'],
1025
+ "value": "None",
1026
+ "info": "add audio to video from file/url or init video"
1027
+ },
1028
+ "soundtrack_path": {
1029
+ "label": "Soundtrack path",
1030
+ "type": "textbox",
1031
+ "value": "https://deforum.github.io/a1/A1.mp3",
1032
+ "info": "abs. path or url to audio file"
1033
+ },
1034
+ "r_upscale_video": {
1035
+ "label": "Upscale",
1036
+ "type": "checkbox",
1037
+ "value": False,
1038
+ "info": "upscale output imgs when run is finished"
1039
+ },
1040
+ "r_upscale_factor": {
1041
+ "label": "Upscale factor",
1042
+ "type": "dropdown",
1043
+ "choices": ['x2', 'x3', 'x4'],
1044
+ "value": "x2",
1045
+ },
1046
+ "r_upscale_model": {
1047
+ "label": "Upscale model",
1048
+ "type": "dropdown",
1049
+ "choices": ['realesr-animevideov3', 'realesrgan-x4plus', 'realesrgan-x4plus-anime'],
1050
+ "value": 'realesr-animevideov3',
1051
+ },
1052
+ "r_upscale_keep_imgs": {
1053
+ "label": "Keep Imgs",
1054
+ "type": "checkbox",
1055
+ "value": True,
1056
+ "info": "don't delete upscaled imgs",
1057
+ },
1058
+ "store_frames_in_ram": {
1059
+ "label": "Store frames in ram",
1060
+ "type": "checkbox",
1061
+ "value": False,
1062
+ "info": "auto-delete imgs when video is ready",
1063
+ "visible": False
1064
+ },
1065
+ "frame_interpolation_engine": {
1066
+ "label": "Engine",
1067
+ "type": "radio",
1068
+ "choices": ['None', 'RIFE v4.6', 'FILM'],
1069
+ "value": "None",
1070
+ "info": "select the frame interpolation engine. hover on the options for more info"
1071
+ },
1072
+ "frame_interpolation_x_amount": {
1073
+ "label": "Interp X",
1074
+ "type": "slider",
1075
+ "minimum": 2,
1076
+ "maximum": 10,
1077
+ "step": 1,
1078
+ "value": 2,
1079
+ },
1080
+ "frame_interpolation_slow_mo_enabled": {
1081
+ "label": "Slow-Mo",
1082
+ "type": "checkbox",
1083
+ "value": False,
1084
+ "visible": False,
1085
+ "info": "Slow-Mo the interpolated video, audio will not be used if enabled",
1086
+ },
1087
+ "frame_interpolation_slow_mo_amount": {
1088
+ "label": "Slow-Mo X",
1089
+ "type": "slider",
1090
+ "minimum": 2,
1091
+ "maximum": 10,
1092
+ "step": 1,
1093
+ "value": 2,
1094
+ },
1095
+ "frame_interpolation_keep_imgs": {
1096
+ "label": "Keep Imgs",
1097
+ "type": "checkbox",
1098
+ "value": False,
1099
+ "info": "Keep interpolated images on disk",
1100
+ "visible": False
1101
+ },
1102
+ "frame_interpolation_use_upscaled": {
1103
+ "label": "Use Upscaled",
1104
+ "type": "checkbox",
1105
+ "value": False,
1106
+ "info": "Interpolate upscaled images, if available",
1107
+ "visible": False
1108
+ },
1109
+
1110
+ }
1111
+
1112
+ def get_component_names():
1113
+ return ['override_settings_with_file', 'custom_settings_file', *DeforumAnimArgs().keys(), 'animation_prompts', 'animation_prompts_positive', 'animation_prompts_negative',
1114
+ *DeforumArgs().keys(), *DeforumOutputArgs().keys(), *ParseqArgs().keys(), *LoopArgs().keys(), *controlnet_component_names()]
1115
+
1116
+ def get_settings_component_names():
1117
+ return [name for name in get_component_names()]
1118
+
1119
+ def pack_args(args_dict, keys_function):
1120
+ return {name: args_dict[name] for name in keys_function()}
1121
+
1122
+ def process_args(args_dict_main, run_id):
1123
+ from .settings import load_args
1124
+ override_settings_with_file = args_dict_main['override_settings_with_file']
1125
+ custom_settings_file = args_dict_main['custom_settings_file']
1126
+ p = args_dict_main['p']
1127
+
1128
+ root = SimpleNamespace(**RootArgs())
1129
+ args = SimpleNamespace(**{name: args_dict_main[name] for name in DeforumArgs()})
1130
+ anim_args = SimpleNamespace(**{name: args_dict_main[name] for name in DeforumAnimArgs()})
1131
+ video_args = SimpleNamespace(**{name: args_dict_main[name] for name in DeforumOutputArgs()})
1132
+ parseq_args = SimpleNamespace(**{name: args_dict_main[name] for name in ParseqArgs()})
1133
+ loop_args = SimpleNamespace(**{name: args_dict_main[name] for name in LoopArgs()})
1134
+ controlnet_args = SimpleNamespace(**{name: args_dict_main[name] for name in controlnet_component_names()})
1135
+
1136
+ root.animation_prompts = json.loads(args_dict_main['animation_prompts'])
1137
+
1138
+ args_loaded_ok = True
1139
+ if override_settings_with_file:
1140
+ args_loaded_ok = load_args(args_dict_main, args, anim_args, parseq_args, loop_args, controlnet_args, video_args, custom_settings_file, root, run_id)
1141
+
1142
+ positive_prompts = args_dict_main['animation_prompts_positive']
1143
+ negative_prompts = args_dict_main['animation_prompts_negative']
1144
+ negative_prompts = negative_prompts.replace('--neg', '') # remove --neg from negative_prompts if received by mistake
1145
+ root.animation_prompts = {key: f"{positive_prompts} {val} {'' if '--neg' in val else '--neg'} {negative_prompts}" for key, val in root.animation_prompts.items()}
1146
+
1147
+ if args.seed == -1:
1148
+ root.raw_seed = -1
1149
+ args.seed = get_fixed_seed(args.seed)
1150
+ if root.raw_seed != -1:
1151
+ root.raw_seed = args.seed
1152
+ root.timestring = time.strftime('%Y%m%d%H%M%S')
1153
+ args.strength = max(0.0, min(1.0, args.strength))
1154
+ args.prompts = json.loads(args_dict_main['animation_prompts'])
1155
+ args.positive_prompts = args_dict_main['animation_prompts_positive']
1156
+ args.negative_prompts = args_dict_main['animation_prompts_negative']
1157
+
1158
+ if not args.use_init and not anim_args.hybrid_use_init_image:
1159
+ args.init_image = None
1160
+ args.init_image_box = None
1161
+
1162
+ elif anim_args.animation_mode == 'Video Input':
1163
+ args.use_init = True
1164
+
1165
+ current_arg_list = [args, anim_args, video_args, parseq_args, root]
1166
+ full_base_folder_path = os.path.join(os.getcwd(), p.outpath_samples)
1167
+ root.raw_batch_name = args.batch_name
1168
+ args.batch_name = substitute_placeholders(args.batch_name, current_arg_list, full_base_folder_path)
1169
+ args.outdir = os.path.join(p.outpath_samples, str(args.batch_name))
1170
+ args.outdir = os.path.join(os.getcwd(), args.outdir)
1171
+ args.outdir = os.path.realpath(args.outdir)
1172
+ os.makedirs(args.outdir, exist_ok=True)
1173
+
1174
+ default_img = Image.open(os.path.join(pathlib.Path(__file__).parent.absolute(), '114763196.jpg'))
1175
+ assert default_img is not None
1176
+ default_img = default_img.resize((args.W,args.H))
1177
+ root.default_img = default_img
1178
+
1179
+ return args_loaded_ok, root, args, anim_args, video_args, parseq_args, loop_args, controlnet_args
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/auto_navigation.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import numpy as np
18
+ import torch
19
+
20
+ # reallybigname - auto-navigation functions in progress...
21
+ # usage:
22
+ # if auto_rotation:
23
+ # rot_mat = rotate_camera_towards_depth(depth_tensor, auto_rotation_steps, w, h, fov_deg, auto_rotation_depth_target)
24
+ def rotate_camera_towards_depth(depth_tensor, turn_weight, width, height, h_fov=60, target_depth=1):
25
+ # Compute the depth at the target depth
26
+ target_depth_index = int(target_depth * depth_tensor.shape[0])
27
+ target_depth_values = depth_tensor[target_depth_index]
28
+ max_depth_index = torch.argmax(target_depth_values).item()
29
+ max_depth_index = (max_depth_index, target_depth_index)
30
+ max_depth = target_depth_values[max_depth_index[0]].item()
31
+
32
+ # Compute the normalized x and y coordinates
33
+ x, y = max_depth_index
34
+ x_normalized = (x / (width - 1)) * 2 - 1
35
+ y_normalized = (y / (height - 1)) * 2 - 1
36
+
37
+ # Calculate horizontal and vertical field of view (in radians)
38
+ h_fov_rad = np.radians(h_fov)
39
+ aspect_ratio = width / height
40
+ v_fov_rad = h_fov_rad / aspect_ratio
41
+
42
+ # Calculate the world coordinates (x, y) at the target depth
43
+ x_world = np.tan(h_fov_rad / 2) * max_depth * x_normalized
44
+ y_world = np.tan(v_fov_rad / 2) * max_depth * y_normalized
45
+
46
+ # Compute the target position using the world coordinates and max_depth
47
+ target_position = np.array([x_world, y_world, max_depth])
48
+
49
+ # Assuming the camera is initially at the origin, and looking in the negative Z direction
50
+ cam_position = np.array([0, 0, 0])
51
+ current_direction = np.array([0, 0, -1])
52
+
53
+ # Compute the direction vector and normalize it
54
+ direction = target_position - cam_position
55
+ direction = direction / np.linalg.norm(direction)
56
+
57
+ # Compute the rotation angle based on the turn_weight (number of frames)
58
+ axis = np.cross(current_direction, direction)
59
+ axis = axis / np.linalg.norm(axis)
60
+ angle = np.arcsin(np.linalg.norm(axis))
61
+ max_angle = np.pi * (0.1 / turn_weight) # Limit the maximum rotation angle to half of the visible screen
62
+ rotation_angle = np.clip(np.sign(np.cross(current_direction, direction)) * angle / turn_weight, -max_angle, max_angle)
63
+
64
+ # Compute the rotation matrix
65
+ rotation_matrix = np.eye(3) + np.sin(rotation_angle) * np.array([
66
+ [0, -axis[2], axis[1]],
67
+ [axis[2], 0, -axis[0]],
68
+ [-axis[1], axis[0], 0]
69
+ ]) + (1 - np.cos(rotation_angle)) * np.outer(axis, axis)
70
+
71
+ # Convert the NumPy array to a PyTorch tensor
72
+ rotation_matrix_tensor = torch.from_numpy(rotation_matrix).float()
73
+
74
+ # Add an extra dimension to match the expected shape (1, 3, 3)
75
+ rotation_matrix_tensor = rotation_matrix_tensor.unsqueeze(0)
76
+
77
+ return rotation_matrix_tensor
78
+
79
+ def rotation_matrix(axis, angle):
80
+ axis = np.asarray(axis)
81
+ axis = axis / np.linalg.norm(axis)
82
+ a = np.cos(angle / 2.0)
83
+ b, c, d = -axis * np.sin(angle / 2.0)
84
+ aa, bb, cc, dd = a * a, b * b, c * c, d * d
85
+ bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
86
+ return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
87
+ [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
88
+ [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/colors.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import cv2
18
+ import pkg_resources
19
+ from skimage.exposure import match_histograms
20
+
21
+ def maintain_colors(prev_img, color_match_sample, mode):
22
+
23
+ match_histograms_kwargs = {'channel_axis': -1}
24
+
25
+ if mode == 'RGB':
26
+ return match_histograms(prev_img, color_match_sample, **match_histograms_kwargs)
27
+ elif mode == 'HSV':
28
+ prev_img_hsv = cv2.cvtColor(prev_img, cv2.COLOR_RGB2HSV)
29
+ color_match_hsv = cv2.cvtColor(color_match_sample, cv2.COLOR_RGB2HSV)
30
+ matched_hsv = match_histograms(prev_img_hsv, color_match_hsv, **match_histograms_kwargs)
31
+ return cv2.cvtColor(matched_hsv, cv2.COLOR_HSV2RGB)
32
+ else: # LAB
33
+ prev_img_lab = cv2.cvtColor(prev_img, cv2.COLOR_RGB2LAB)
34
+ color_match_lab = cv2.cvtColor(color_match_sample, cv2.COLOR_RGB2LAB)
35
+ matched_lab = match_histograms(prev_img_lab, color_match_lab, **match_histograms_kwargs)
36
+ return cv2.cvtColor(matched_lab, cv2.COLOR_LAB2RGB)
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/composable_masks.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ # At the moment there are three types of masks: mask from variable, file mask and word mask
18
+ # Variable masks include video_mask (which can be set to auto-generated human masks) and everywhere
19
+ # They are put in {}-brackets
20
+ # Word masks are framed with <>-bracets, like: <cat>, <anime girl>
21
+ # File masks are put in []-brackes
22
+ # Empty strings are counted as the whole frame
23
+ # We want to put them all into a sequence of boolean operations
24
+
25
+ # Example:
26
+ # \ <armor>
27
+ # (({human_mask} & [mask1.png]) ^ <apple>)
28
+
29
+ # Writing the parser for the boolean sequence
30
+ # using regex and PIL operations
31
+ import re
32
+ from .load_images import get_mask_from_file, check_mask_for_errors, blank_if_none
33
+ from .word_masking import get_word_mask
34
+ from PIL import ImageChops
35
+ from modules.shared import opts
36
+
37
+ # val_masks: name, PIL Image mask
38
+ # Returns an image in mode '1' (needed for bool ops), convert to 'L' in the sender function
39
+ def compose_mask(root, args, mask_seq, val_masks, frame_image, inner_idx:int = 0):
40
+ # Compose_mask recursively: go to inner brackets, then b-op it and go upstack
41
+
42
+ # Step 1:
43
+ # recursive parenthesis pass
44
+ # regex is not powerful here
45
+
46
+ seq = ""
47
+ inner_seq = ""
48
+ parentheses_counter = 0
49
+
50
+ for c in mask_seq:
51
+ if c == ')':
52
+ parentheses_counter = parentheses_counter - 1
53
+ if parentheses_counter > 0:
54
+ inner_seq += c
55
+ if c == '(':
56
+ parentheses_counter = parentheses_counter + 1
57
+ if parentheses_counter == 0:
58
+ if len(inner_seq) > 0:
59
+ inner_idx += 1
60
+ seq += compose_mask(root, args, inner_seq, val_masks, frame_image, inner_idx)
61
+ inner_seq = ""
62
+ else:
63
+ seq += c
64
+
65
+ if parentheses_counter != 0:
66
+ raise Exception('Mismatched parentheses in {mask_seq}!')
67
+
68
+ mask_seq = seq
69
+
70
+ # Step 2:
71
+ # Load the word masks and file masks as vars
72
+
73
+ # File masks
74
+ pattern = r'\[(?P<inner>[\S\s]*?)\]'
75
+
76
+ def parse(match_object):
77
+ nonlocal inner_idx
78
+ inner_idx += 1
79
+ content = match_object.groupdict()['inner']
80
+ val_masks[str(inner_idx)] = get_mask_from_file(content, args).convert('1') # TODO: add caching
81
+ return f"{{{inner_idx}}}"
82
+
83
+ mask_seq = re.sub(pattern, parse, mask_seq)
84
+
85
+ # Word masks
86
+ pattern = r'<(?P<inner>[\S\s]*?)>'
87
+
88
+ def parse(match_object):
89
+ nonlocal inner_idx
90
+ inner_idx += 1
91
+ content = match_object.groupdict()['inner']
92
+ val_masks[str(inner_idx)] = get_word_mask(root, frame_image, content).convert('1')
93
+ return f"{{{inner_idx}}}"
94
+
95
+ mask_seq = re.sub(pattern, parse, mask_seq)
96
+
97
+ # Now that all inner parenthesis are eliminated we're left with a linear string
98
+
99
+ # Step 3:
100
+ # Boolean operations with masks
101
+ # Operators: invert !, and &, or |, xor ^, difference \
102
+
103
+ # Invert vars with '!'
104
+ pattern = r'![\S\s]*{(?P<inner>[\S\s]*?)}'
105
+ def parse(match_object):
106
+ nonlocal inner_idx
107
+ inner_idx += 1
108
+ content = match_object.groupdict()['inner']
109
+ savename = content
110
+ if content in root.mask_preset_names:
111
+ inner_idx += 1
112
+ savename = str(inner_idx)
113
+ val_masks[savename] = ImageChops.invert(val_masks[content])
114
+ return f"{{{savename}}}"
115
+
116
+ mask_seq = re.sub(pattern, parse, mask_seq)
117
+
118
+ # Multiply neighbouring vars with '&'
119
+ # Wait for replacements stall (like in Markov chains)
120
+ while True:
121
+ pattern = r'{(?P<inner1>[\S\s]*?)}[\s]*&[\s]*{(?P<inner2>[\S\s]*?)}'
122
+ def parse(match_object):
123
+ nonlocal inner_idx
124
+ inner_idx += 1
125
+ content = match_object.groupdict()['inner1']
126
+ content_second = match_object.groupdict()['inner2']
127
+ savename = content
128
+ if content in root.mask_preset_names:
129
+ inner_idx += 1
130
+ savename = str(inner_idx)
131
+ val_masks[savename] = ImageChops.logical_and(val_masks[content], val_masks[content_second])
132
+ return f"{{{savename}}}"
133
+
134
+ prev_mask_seq = mask_seq
135
+ mask_seq = re.sub(pattern, parse, mask_seq)
136
+ if mask_seq is prev_mask_seq:
137
+ break
138
+
139
+ # Add neighbouring vars with '|'
140
+ while True:
141
+ pattern = r'{(?P<inner1>[\S\s]*?)}[\s]*?\|[\s]*?{(?P<inner2>[\S\s]*?)}'
142
+ def parse(match_object):
143
+ nonlocal inner_idx
144
+ inner_idx += 1
145
+ content = match_object.groupdict()['inner1']
146
+ content_second = match_object.groupdict()['inner2']
147
+ savename = content
148
+ if content in root.mask_preset_names:
149
+ inner_idx += 1
150
+ savename = str(inner_idx)
151
+ val_masks[savename] = ImageChops.logical_or(val_masks[content], val_masks[content_second])
152
+ return f"{{{savename}}}"
153
+
154
+ prev_mask_seq = mask_seq
155
+ mask_seq = re.sub(pattern, parse, mask_seq)
156
+ if mask_seq is prev_mask_seq:
157
+ break
158
+
159
+ # Mutually exclude neighbouring vars with '^'
160
+ while True:
161
+ pattern = r'{(?P<inner1>[\S\s]*?)}[\s]*\^[\s]*{(?P<inner2>[\S\s]*?)}'
162
+ def parse(match_object):
163
+ nonlocal inner_idx
164
+ inner_idx += 1
165
+ content = match_object.groupdict()['inner1']
166
+ content_second = match_object.groupdict()['inner2']
167
+ savename = content
168
+ if content in root.mask_preset_names:
169
+ inner_idx += 1
170
+ savename = str(inner_idx)
171
+ val_masks[savename] = ImageChops.logical_xor(val_masks[content], val_masks[content_second])
172
+ return f"{{{savename}}}"
173
+
174
+ prev_mask_seq = mask_seq
175
+ mask_seq = re.sub(pattern, parse, mask_seq)
176
+ if mask_seq is prev_mask_seq:
177
+ break
178
+
179
+ # Set-difference the regions with '\'
180
+ while True:
181
+ pattern = r'{(?P<inner1>[\S\s]*?)}[\s]*\\[\s]*{(?P<inner2>[\S\s]*?)}'
182
+ def parse(match_object):
183
+ content = match_object.groupdict()['inner1']
184
+ content_second = match_object.groupdict()['inner2']
185
+ savename = content
186
+ if content in root.mask_preset_names:
187
+ nonlocal inner_idx
188
+ inner_idx += 1
189
+ savename = str(inner_idx)
190
+ val_masks[savename] = ImageChops.logical_and(val_masks[content], ImageChops.invert(val_masks[content_second]))
191
+ return f"{{{savename}}}"
192
+
193
+ prev_mask_seq = mask_seq
194
+ mask_seq = re.sub(pattern, parse, mask_seq)
195
+ if mask_seq is prev_mask_seq:
196
+ break
197
+
198
+ # Step 4:
199
+ # Output
200
+ # Now we should have a single var left to return. If not, raise an error message
201
+ pattern = r'{(?P<inner>[\S\s]*?)}'
202
+ matches = re.findall(pattern, mask_seq)
203
+
204
+ if len(matches) != 1:
205
+ raise Exception(f'Wrong composable mask expression format! Broken mask sequence: {mask_seq}')
206
+
207
+ return f"{{{matches[0]}}}"
208
+
209
+ def compose_mask_with_check(root, args, mask_seq, val_masks, frame_image):
210
+ for k, v in val_masks.items():
211
+ val_masks[k] = blank_if_none(v, args.W, args.H, '1').convert('1')
212
+ return check_mask_for_errors(val_masks[compose_mask(root, args, mask_seq, val_masks, frame_image, 0)[1:-1]].convert('L'))
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/consistency_check.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ '''
18
+ Taken from https://github.com/Sxela/flow_tools/blob/main (GNU GPL Licensed), and modified to suit Deforum
19
+ '''
20
+ # import argparse
21
+ # import PIL.Image
22
+ import numpy as np
23
+ # import scipy.ndimage
24
+ # import glob
25
+ # from tqdm import tqdm
26
+
27
+ def make_consistency(flow1, flow2, edges_unreliable=False):
28
+ # Awesome pythonic consistency check from [maua](https://github.com/maua-maua-maua/maua/blob/44485c745c65cf9d83cb1b1c792a177588e9c9fc/maua/flow/consistency.py) by Hans Brouwer and Henry Rachootin
29
+ # algorithm based on https://github.com/manuelruder/artistic-videos/blob/master/consistencyChecker/consistencyChecker.cpp
30
+ # reimplemented in numpy by Hans Brouwer
31
+ # // consistencyChecker
32
+ # // Check consistency of forward flow via backward flow.
33
+ # // (c) Manuel Ruder, Alexey Dosovitskiy, Thomas Brox 2016
34
+
35
+ flow1 = np.flip(flow1, axis=2)
36
+ flow2 = np.flip(flow2, axis=2)
37
+ h, w, _ = flow1.shape
38
+
39
+ # get grid of coordinates for each pixel
40
+ orig_coord = np.flip(np.mgrid[:w, :h], 0).T
41
+
42
+ # find where the flow1 maps each pixel
43
+ warp_coord = orig_coord + flow1
44
+
45
+ # clip the coordinates in bounds and round down
46
+ warp_coord_inbound = np.zeros_like(warp_coord)
47
+ warp_coord_inbound[..., 0] = np.clip(warp_coord[..., 0], 0, h - 2)
48
+ warp_coord_inbound[..., 1] = np.clip(warp_coord[..., 1], 0, w - 2)
49
+ warp_coord_floor = np.floor(warp_coord_inbound).astype(int)
50
+
51
+ # for each pixel: bilinear interpolation of the corresponding flow2 values around the point mapped to by flow1
52
+ alpha = warp_coord_inbound - warp_coord_floor
53
+ flow2_00 = flow2[warp_coord_floor[..., 0], warp_coord_floor[..., 1]]
54
+ flow2_01 = flow2[warp_coord_floor[..., 0], warp_coord_floor[..., 1] + 1]
55
+ flow2_10 = flow2[warp_coord_floor[..., 0] + 1, warp_coord_floor[..., 1]]
56
+ flow2_11 = flow2[warp_coord_floor[..., 0] + 1, warp_coord_floor[..., 1] + 1]
57
+ flow2_0_blend = (1 - alpha[..., 1, None]) * flow2_00 + alpha[..., 1, None] * flow2_01
58
+ flow2_1_blend = (1 - alpha[..., 1, None]) * flow2_10 + alpha[..., 1, None] * flow2_11
59
+ warp_coord_flow2 = (1 - alpha[..., 0, None]) * flow2_0_blend + alpha[..., 0, None] * flow2_1_blend
60
+
61
+ # coordinates that flow2 remaps each flow1-mapped pixel to
62
+ rewarp_coord = warp_coord + warp_coord_flow2
63
+
64
+ # where the difference in position after flow1 and flow2 are applied is larger than a threshold there is likely an
65
+ # occlusion. set values to -1 so the final gaussian blur will spread the value a couple pixels around this area
66
+ squared_diff = np.sum((rewarp_coord - orig_coord) ** 2, axis=2)
67
+ threshold = 0.01 * np.sum(warp_coord_flow2 ** 2 + flow1 ** 2, axis=2) + 0.5
68
+
69
+ reliable_flow = np.ones((squared_diff.shape[0], squared_diff.shape[1], 3))
70
+ reliable_flow[...,0] = np.where(squared_diff >= threshold, -0.75, 1)
71
+
72
+ # areas mapping outside of the frame are also occluded (don't need extra region around these though, so set 0)
73
+ if edges_unreliable:
74
+ reliable_flow[...,1] = np.where(
75
+ np.logical_or.reduce(
76
+ (
77
+ warp_coord[..., 0] < 0,
78
+ warp_coord[..., 1] < 0,
79
+ warp_coord[..., 0] >= h - 1,
80
+ warp_coord[..., 1] >= w - 1,
81
+ )
82
+ ),
83
+ 0,
84
+ reliable_flow[...,1],
85
+ )
86
+
87
+ # get derivative of flow, large changes in derivative => edge of moving object
88
+ dx = np.diff(flow1, axis=1, append=0)
89
+ dy = np.diff(flow1, axis=0, append=0)
90
+ motion_edge = np.sum(dx ** 2 + dy ** 2, axis=2)
91
+ motion_threshold = 0.01 * np.sum(flow1 ** 2, axis=2) + 0.002
92
+ reliable_flow[...,2] = np.where(np.logical_and(motion_edge > motion_threshold, reliable_flow[...,2] != -0.75), 0, reliable_flow[...,2])
93
+
94
+ return reliable_flow
95
+
96
+
97
+ # parser = argparse.ArgumentParser()
98
+ # parser.add_argument("--flow_fwd", type=str, required=True, help="Forward flow path or glob pattern")
99
+ # parser.add_argument("--flow_bwd", type=str, required=True, help="Backward flow path or glob pattern")
100
+ # parser.add_argument("--output", type=str, required=True, help="Output consistency map path")
101
+ # parser.add_argument("--output_postfix", type=str, default='_cc', help="Output consistency map name postfix")
102
+ # parser.add_argument("--image_output", action='store_true', help="Output consistency map as b\w image path")
103
+ # parser.add_argument("--skip_numpy_output", action='store_true', help="Don`t save numpy array")
104
+ # parser.add_argument("--blur", type=float, default=2., help="Gaussian blur kernel size (0 for no blur)")
105
+ # parser.add_argument("--bottom_clamp", type=float, default=0., help="Clamp lower values")
106
+ # parser.add_argument("--edges_reliable", action='store_true', help="Consider edges reliable")
107
+ # parser.add_argument("--save_separate_channels", action='store_true', help="Save consistency mask layers as separate channels")
108
+ # args = parser.parse_args()
109
+
110
+ # def run(args):
111
+ # flow_fwd_many = sorted(glob.glob(args.flow_fwd))
112
+ # flow_bwd_many = sorted(glob.glob(args.flow_bwd))
113
+ # if len(flow_fwd_many)!= len(flow_bwd_many):
114
+ # raise Exception('Forward and backward flow file numbers don`t match')
115
+ # return
116
+
117
+ # for flow_fwd,flow_bwd in tqdm(zip(flow_fwd_many, flow_bwd_many)):
118
+ # flow_fwd = flow_fwd.replace('\\','/')
119
+ # flow_bwd = flow_bwd.replace('\\','/')
120
+ # flow1 = np.load(flow_fwd)
121
+ # flow2 = np.load(flow_bwd)
122
+ # consistency_map_multilayer = make_consistency(flow1, flow2, edges_unreliable=not args.edges_reliable)
123
+
124
+ # if args.save_separate_channels:
125
+ # consistency_map = consistency_map_multilayer
126
+ # else:
127
+ # consistency_map = np.ones_like(consistency_map_multilayer[...,0])
128
+ # consistency_map*=consistency_map_multilayer[...,0]
129
+ # consistency_map*=consistency_map_multilayer[...,1]
130
+ # consistency_map*=consistency_map_multilayer[...,2]
131
+
132
+ # # blur
133
+ # if args.blur>0.:
134
+ # consistency_map = scipy.ndimage.gaussian_filter(consistency_map, [args.blur, args.blur])
135
+
136
+ # #clip values between bottom_clamp and 1
137
+ # bottom_clamp = min(max(args.bottom_clamp,0.), 0.999)
138
+ # consistency_map = consistency_map.clip(bottom_clamp, 1)
139
+ # out_fname = args.output+'/'+flow_fwd.split('/')[-1][:-4]+args.output_postfix
140
+
141
+ # if not args.skip_numpy_output:
142
+ # np.save(out_fname, consistency_map)
143
+
144
+ # #save as jpeg
145
+ # if args.image_output:
146
+ # PIL.Image.fromarray((consistency_map*255.).astype('uint8')).save(out_fname+'.jpg', quality=90)
147
+
148
+ # run(args)
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/defaults.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ def get_samplers_list():
18
+ return {
19
+ 'euler a': 'Euler a',
20
+ 'euler': 'Euler',
21
+ 'lms': 'LMS',
22
+ 'heun': 'Heun',
23
+ 'dpm2': 'DPM2',
24
+ 'dpm2 a': 'DPM2 a',
25
+ 'dpm++ 2s a': 'DPM++ 2S a',
26
+ 'dpm++ 2m': 'DPM++ 2M',
27
+ 'dpm++ sde': 'DPM++ SDE',
28
+ 'dpm fast': 'DPM fast',
29
+ 'dpm adaptive': 'DPM adaptive',
30
+ 'lms karras': 'LMS Karras',
31
+ 'dpm2 karras': 'DPM2 Karras',
32
+ 'dpm2 a karras': 'DPM2 a Karras',
33
+ 'dpm++ 2s a karras': 'DPM++ 2S a Karras',
34
+ 'dpm++ 2m karras': 'DPM++ 2M Karras',
35
+ 'dpm++ sde karras': 'DPM++ SDE Karras'
36
+ }
37
+
38
+ def DeforumAnimPrompts():
39
+ return r"""{
40
+ "0": "tiny cute bunny, vibrant diffraction, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus",
41
+ "30": "anthropomorphic clean cat, surrounded by fractals, epic angle and pose, symmetrical, 3d, depth of field",
42
+ "60": "a beautiful coconut --neg photo, realistic",
43
+ "90": "a beautiful durian, award winning photography"
44
+ }
45
+ """
46
+
47
+ # Guided images defaults
48
+ def get_guided_imgs_default_json():
49
+ return '''{
50
+ "0": "https://deforum.github.io/a1/Gi1.png",
51
+ "max_f/4-5": "https://deforum.github.io/a1/Gi2.png",
52
+ "max_f/2-10": "https://deforum.github.io/a1/Gi3.png",
53
+ "3*max_f/4-15": "https://deforum.github.io/a1/Gi4.jpg",
54
+ "max_f-20": "https://deforum.github.io/a1/Gi1.png"
55
+ }'''
56
+
57
+ def get_hybrid_info_html():
58
+ return """
59
+ <p style="padding-bottom:0">
60
+ <b style="text-shadow: blue -1px -1px;">Hybrid Video Compositing in 2D/3D Mode</b>
61
+ <span style="color:#DDD;font-size:0.7rem;text-shadow: black -1px -1px;margin-left:10px;">
62
+ by <a href="https://github.com/reallybigname">reallybigname</a>
63
+ </span>
64
+ </p>
65
+ <ul style="list-style-type:circle; margin-left:1em; margin-bottom:1em;">
66
+ <li>Composite video with previous frame init image in <b>2D or 3D animation_mode</b> <i>(not for Video Input mode)</i></li>
67
+ <li>Uses your <b>Init</b> settings for <b>video_init_path, extract_nth_frame, overwrite_extracted_frames</b></li>
68
+ <li>In Keyframes tab, you can also set <b>color_coherence</b> = '<b>Video Input</b>'</li>
69
+ <li><b>color_coherence_video_every_N_frames</b> lets you only match every N frames</li>
70
+ <li>Color coherence may be used with hybrid composite off, to just use video color.</li>
71
+ <li>Hybrid motion may be used with hybrid composite off, to just use video motion.</li>
72
+ </ul>
73
+ Hybrid Video Schedules
74
+ <ul style="list-style-type:circle; margin-left:1em; margin-bottom:1em;">
75
+ <li>The alpha schedule controls overall alpha for video mix, whether using a composite mask or not.</li>
76
+ <li>The <b>hybrid_comp_mask_blend_alpha_schedule</b> only affects the 'Blend' <b>hybrid_comp_mask_type</b>.</li>
77
+ <li>Mask contrast schedule is from 0-255. Normal is 1. Affects all masks.</li>
78
+ <li>Autocontrast low/high cutoff schedules 0-100. Low 0 High 100 is full range. <br>(<i><b>hybrid_comp_mask_auto_contrast</b> must be enabled</i>)</li>
79
+ </ul>
80
+ <a style='color:SteelBlue;' target='_blank' href='https://github.com/deforum-art/deforum-for-automatic1111-webui/wiki/Animation-Settings#hybrid-video-mode-for-2d3d-animations'>Click Here</a> for more info/ a Guide.
81
+ """
82
+
83
+ def get_composable_masks_info_html():
84
+ return """
85
+ <ul style="list-style-type:circle; margin-left:0.75em; margin-bottom:0.2em">
86
+ <li>To enable, check use_mask in the Init tab</li>
87
+ <li>Supports boolean operations: (! - negation, & - and, | - or, ^ - xor, \ - difference, () - nested operations)</li>
88
+ <li>default variables: in \{\}, like \{init_mask\}, \{video_mask\}, \{everywhere\}</li>
89
+ <li>masks from files: in [], like [mask1.png]</li>
90
+ <li>description-based: <i>word masks</i> in &lt;&gt;, like &lt;apple&gt;, &lt;hair&gt</li>
91
+ </ul>
92
+ """
93
+
94
+ def get_parseq_info_html():
95
+ return """
96
+ <p>Use a <a style='color:SteelBlue;' target='_blank' href='https://sd-parseq.web.app/deforum'>Parseq</a> manifest for your animation (leave blank to ignore).</p>
97
+ <p style="margin-top:1em; margin-bottom:1em;">
98
+ Fields managed in your Parseq manifest override the values and schedules set in other parts of this UI. You can select which values to override by using the "Managed Fields" section in Parseq.
99
+ </p>
100
+ """
101
+
102
+ def get_prompts_info_html():
103
+ return """
104
+ <ul style="list-style-type:circle; margin-left:0.75em; margin-bottom:0.2em">
105
+ <li>Please always keep values in math functions above 0.</li>
106
+ <li>There is *no* Batch mode like in vanilla deforum. Please Use the txt2img tab for that.</li>
107
+ <li>For negative prompts, please write your positive prompt, then --neg ugly, text, assymetric, or any other negative tokens of your choice. OR:</li>
108
+ <li>Use the negative_prompts field to automatically append all words as a negative prompt. *Don't* add --neg in the negative_prompts field!</li>
109
+ <li>Prompts are stored in JSON format. If you've got an error, check it in a <a style="color:SteelBlue" href="https://odu.github.io/slingjsonlint/">JSON Validator</a></li>
110
+ </ul>
111
+ """
112
+
113
+ def get_guided_imgs_info_html():
114
+ return """
115
+ <p>You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field.
116
+ Set the keyframes and the images that you want to show up.
117
+ Note: the number of frames between each keyframe should be greater than the tweening frames.</p>
118
+
119
+ <p>Prerequisites and Important Info:</p>
120
+ <ul style="list-style-type:circle; margin-left:2em; margin-bottom:0em">
121
+ <li>This mode works ONLY with 2D/3D animation modes. Interpolation and Video Input modes aren't supported.</li>
122
+ <li>Init tab's strength slider should be greater than 0. Recommended value (.65 - .80).</li>
123
+ <li>'seed_behavior' will be forcibly set to 'schedule'.</li>
124
+ </ul>
125
+
126
+ <p>Looping recommendations:</p>
127
+ <ul style="list-style-type:circle; margin-left:2em; margin-bottom:0em">
128
+ <li>seed_schedule should start and end on the same seed.<br />
129
+ Example: seed_schedule could use 0:(5), 1:(-1), 219:(-1), 220:(5)</li>
130
+ <li>The 1st and last keyframe images should match.</li>
131
+ <li>Set your total number of keyframes to be 21 more than the last inserted keyframe image.<br />
132
+ Example: Default args should use 221 as the total keyframes.</li>
133
+ <li>Prompts are stored in JSON format. If you've got an error, check it in the validator,
134
+ <a style="color:SteelBlue" href="https://odu.github.io/slingjsonlint/">like here</a></li>
135
+ </ul>
136
+
137
+ <p>The Guided images mode exposes the following variables for the prompts and the schedules:</p>
138
+ <ul style="list-style-type:circle; margin-left:2em; margin-bottom:0em">
139
+ <li><b>s</b> is the <i>initial</i> seed for the whole video generation.</li>
140
+ <li><b>max_f</b> is the length of the video, in frames.<br />
141
+ Example: seed_schedule could use 0:(s), 1:(-1), "max_f-2":(-1), "max_f-1":(s)</li>
142
+ <li><b>t</b> is the current frame number.<br />
143
+ Example: strength_schedule could use 0:(0.25 * cos((72 / 60 * 3.141 * (t + 0) / 30))**13 + 0.7) to make alternating changes each 30 frames</li>
144
+ </ul>
145
+ """
146
+
147
+ def get_main_info_html():
148
+ return """
149
+ <p><strong>Made by <a href="https://deforum.github.io">deforum.github.io</a>, port for AUTOMATIC1111's webui maintained by <a href="https://github.com/deforum-art">Deforum LLC.</a></strong></p>
150
+ <p><a style="color:SteelBlue" href="https://github.com/deforum-art/deforum-for-automatic1111-webui/wiki/FAQ-&-Troubleshooting">FOR HELP CLICK HERE</a></p>
151
+ <ul style="list-style-type:circle; margin-left:1em">
152
+ <li>The code for this extension: <a style="color:SteelBlue" href="https://github.com/deforum-art/deforum-for-automatic1111-webui">here</a>.</li>
153
+ <li>Join the <a style="color:SteelBlue" href="https://discord.gg/deforum">official Deforum Discord</a> to share your creations and suggestions.</li>
154
+ <li>Official Deforum Wiki: <a style="color:SteelBlue" href="https://github.com/deforum-art/deforum-for-automatic1111-webui/wiki">here</a>.</li>
155
+ <li>Anime-inclined great guide (by FizzleDorf) with lots of examples: <a style="color:SteelBlue" href="https://rentry.org/AnimAnon-Deforum">here</a>.</li>
156
+ <li>For advanced keyframing with Math functions, see <a style="color:SteelBlue" href="https://github.com/deforum-art/deforum-for-automatic1111-webui/wiki/Maths-in-Deforum">here</a>.</li>
157
+ <li>Alternatively, use <a style="color:SteelBlue" href="https://sd-parseq.web.app/deforum">sd-parseq</a> as a UI to define your animation schedules (see the Parseq section in the Init tab).</li>
158
+ <li><a style="color:SteelBlue" href="https://www.framesync.xyz/">framesync.xyz</a> is also a good option, it makes compact math formulae for Deforum keyframes by selecting various waveforms.</li>
159
+ <li>The other site allows for making keyframes using <a style="color:SteelBlue" href="https://www.chigozie.co.uk/keyframe-string-generator/">interactive splines and Bezier curves</a> (select Disco output format).</li>
160
+ <li>If you want to use Width/Height which are not multiples of 64, please change noise_type to 'Uniform', in Keyframes --> Noise.</li>
161
+ </ul>
162
+ <italic>If you liked this extension, please <a style="color:SteelBlue" href="https://github.com/deforum-art/deforum-for-automatic1111-webui">give it a star on GitHub</a>!</italic> 😊
163
+ """
164
+ def get_frame_interpolation_info_html():
165
+ return """
166
+ Use <a href="https://github.com/megvii-research/ECCV2022-RIFE">RIFE</a> / <a href="https://film-net.github.io/">FILM</a> Frame Interpolation to smooth out, slow-mo (or both) any video.</p>
167
+ <p style="margin-top:1em">
168
+ Supported engines:
169
+ <ul style="list-style-type:circle; margin-left:1em; margin-bottom:1em">
170
+ <li>RIFE v4.6 and FILM.</li>
171
+ </ul>
172
+ </p>
173
+ <p style="margin-top:1em">
174
+ Important notes:
175
+ <ul style="list-style-type:circle; margin-left:1em; margin-bottom:1em">
176
+ <li>Frame Interpolation will *not* run if any of the following are enabled: 'Store frames in ram' / 'Skip video for run all'.</li>
177
+ <li>Audio (if provided) will *not* be transferred to the interpolated video if Slow-Mo is enabled.</li>
178
+ <li>'add_soundtrack' and 'soundtrack_path' aren't being honoured in "Interpolate an existing video" mode. Original vid audio will be used instead with the same slow-mo rules above.</li>
179
+ <li>In "Interpolate existing pics" mode, FPS is determined *only* by output FPS slider. Audio will be added if requested even with slow-mo "enabled", as it does *nothing* in this mode.</li>
180
+ </ul>
181
+ </p>
182
+ """
183
+ def get_frames_to_video_info_html():
184
+ return """
185
+ <p style="margin-top:0em">
186
+ Important Notes:
187
+ <ul style="list-style-type:circle; margin-left:1em; margin-bottom:0.25em">
188
+ <li>Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%09d.png', just replace 20230124234916 with your batch ID. The %09d is important, don't forget it!</li>
189
+ <li>In the filename, '%09d' represents the 9 counting numbers, For '20230124234916_000000001.png', use '20230124234916_%09d.png'</li>
190
+ <li>If non-deforum frames, use the correct number of counting digits. For files like 'bunnies-0000.jpg', you'd use 'bunnies-%04d.jpg'</li>
191
+ </ul>
192
+ """
193
+ def get_leres_info_html():
194
+ return 'Note that LeReS has a Non-Commercial <a href="https://github.com/aim-uofa/AdelaiDepth/blob/main/LeReS/LICENSE" target="_blank">license</a>. Use it only for fun/personal use.'
195
+
196
+ def get_gradio_html(section_name):
197
+ if section_name.lower() == 'hybrid_video':
198
+ return get_hybrid_info_html()
199
+ elif section_name.lower() == 'composable_masks':
200
+ return get_composable_masks_info_html()
201
+ elif section_name.lower() == 'parseq':
202
+ return get_parseq_info_html()
203
+ elif section_name.lower() == 'prompts':
204
+ return get_prompts_info_html()
205
+ elif section_name.lower() == 'guided_imgs':
206
+ return get_guided_imgs_info_html()
207
+ elif section_name.lower() == 'main':
208
+ return get_main_info_html()
209
+ elif section_name.lower() == 'frame_interpolation':
210
+ return get_frame_interpolation_info_html()
211
+ elif section_name.lower() == 'frames_to_video':
212
+ return get_frames_to_video_info_html()
213
+ elif section_name.lower() == 'leres':
214
+ return get_leres_info_html()
215
+ else:
216
+ return ""
217
+
218
+ mask_fill_choices = ['fill', 'original', 'latent noise', 'latent nothing']
219
+
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_controlnet.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ # This helper script is responsible for ControlNet/Deforum integration
18
+ # https://github.com/Mikubill/sd-webui-controlnet — controlnet repo
19
+
20
+ import os
21
+ import copy
22
+ import gradio as gr
23
+ import scripts
24
+ from PIL import Image
25
+ import numpy as np
26
+ import importlib
27
+ from modules import scripts, shared
28
+ from .deforum_controlnet_gradio import hide_ui_by_cn_status, hide_file_textboxes, ToolButton
29
+ from .general_utils import count_files_in_folder, clean_gradio_path_strings # TODO: do it another way
30
+ from .video_audio_utilities import vid2frames, convert_image
31
+ from .animation_key_frames import ControlNetKeys
32
+ from .load_images import load_image
33
+ from .general_utils import debug_print
34
+
35
+ cnet = None
36
+ # number of CN model tabs to show in the deforum gui. If the user has set it in the A1111 UI to a value less than 5
37
+ # then we set it to 5. Else, we respect the value they specified
38
+ max_models = shared.opts.data.get("control_net_unit_count", shared.opts.data.get("control_net_max_models_num", 5))
39
+ num_of_models = 5 if max_models <= 5 else max_models
40
+
41
+ def find_controlnet():
42
+ global cnet
43
+ if cnet: return cnet
44
+ try:
45
+ cnet = importlib.import_module('extensions.sd-webui-controlnet.scripts.external_code', 'external_code')
46
+ except:
47
+ try:
48
+ cnet = importlib.import_module('extensions-builtin.sd-webui-controlnet.scripts.external_code', 'external_code')
49
+ except:
50
+ pass
51
+ if cnet:
52
+ print(f"\033[0;32m*Deforum ControlNet support: enabled*\033[0m")
53
+ return True
54
+ return None
55
+
56
+ def controlnet_infotext():
57
+ return """Requires the <a style='color:SteelBlue;' target='_blank' href='https://github.com/Mikubill/sd-webui-controlnet'>ControlNet</a> extension to be installed.</p>
58
+ <p">If Deforum crashes due to CN updates, go <a style='color:Orange;' target='_blank' href='https://github.com/Mikubill/sd-webui-controlnet/issues'>here</a> and report your problem.</p>
59
+ """
60
+
61
+ def is_controlnet_enabled(controlnet_args):
62
+ for i in range(1, num_of_models + 1):
63
+ if getattr(controlnet_args, f'cn_{i}_enabled', False):
64
+ return True
65
+ return False
66
+
67
+ def setup_controlnet_ui_raw():
68
+ cnet = find_controlnet()
69
+ cn_models = cnet.get_models()
70
+ cn_preprocessors = cnet.get_modules()
71
+
72
+ cn_modules = cnet.get_modules_detail()
73
+ preprocessor_sliders_config = {}
74
+
75
+ for config_name, config_values in cn_modules.items():
76
+ sliders = config_values.get('sliders', [])
77
+ preprocessor_sliders_config[config_name] = sliders
78
+
79
+ model_free_preprocessors = ["reference_only", "reference_adain", "reference_adain+attn"]
80
+ flag_preprocessor_resolution = "Preprocessor Resolution"
81
+
82
+ def build_sliders(module, pp):
83
+ grs = []
84
+ if module not in preprocessor_sliders_config:
85
+ grs += [
86
+ gr.update(label=flag_preprocessor_resolution, value=512, minimum=64, maximum=2048, step=1, visible=not pp, interactive=not pp),
87
+ gr.update(visible=False, interactive=False),
88
+ gr.update(visible=False, interactive=False),
89
+ gr.update(visible=True)
90
+ ]
91
+ else:
92
+ for slider_config in preprocessor_sliders_config[module]:
93
+ if isinstance(slider_config, dict):
94
+ visible = True
95
+ if slider_config['name'] == flag_preprocessor_resolution:
96
+ visible = not pp
97
+ grs.append(gr.update(
98
+ label=slider_config['name'],
99
+ value=slider_config['value'],
100
+ minimum=slider_config['min'],
101
+ maximum=slider_config['max'],
102
+ step=slider_config['step'] if 'step' in slider_config else 1,
103
+ visible=visible,
104
+ interactive=visible))
105
+ else:
106
+ grs.append(gr.update(visible=False, interactive=False))
107
+ while len(grs) < 3:
108
+ grs.append(gr.update(visible=False, interactive=False))
109
+ grs.append(gr.update(visible=True))
110
+ if module in model_free_preprocessors:
111
+ grs += [gr.update(visible=False, value='None'), gr.update(visible=False)]
112
+ else:
113
+ grs += [gr.update(visible=True), gr.update(visible=True)]
114
+ return grs
115
+
116
+ refresh_symbol = '\U0001f504' # 🔄
117
+ switch_values_symbol = '\U000021C5' # ⇅
118
+ model_dropdowns = []
119
+ infotext_fields = []
120
+
121
+ def create_model_in_tab_ui(cn_id):
122
+ with gr.Row():
123
+ enabled = gr.Checkbox(label="Enable", value=False, interactive=True)
124
+ pixel_perfect = gr.Checkbox(label="Pixel Perfect", value=False, visible=False, interactive=True)
125
+ low_vram = gr.Checkbox(label="Low VRAM", value=False, visible=False, interactive=True)
126
+ overwrite_frames = gr.Checkbox(label='Overwrite input frames', value=True, visible=False, interactive=True)
127
+ with gr.Row(visible=False) as mod_row:
128
+ module = gr.Dropdown(cn_preprocessors, label=f"Preprocessor", value="none", interactive=True)
129
+ model = gr.Dropdown(cn_models, label=f"Model", value="None", interactive=True)
130
+ refresh_models = ToolButton(value=refresh_symbol)
131
+ refresh_models.click(refresh_all_models, model, model)
132
+ with gr.Row(visible=False) as weight_row:
133
+ weight = gr.Textbox(label="Weight schedule", lines=1, value='0:(1)', interactive=True)
134
+ with gr.Row(visible=False) as start_cs_row:
135
+ guidance_start = gr.Textbox(label="Starting Control Step schedule", lines=1, value='0:(0.0)', interactive=True)
136
+ with gr.Row(visible=False) as end_cs_row:
137
+ guidance_end = gr.Textbox(label="Ending Control Step schedule", lines=1, value='0:(1.0)', interactive=True)
138
+ model_dropdowns.append(model)
139
+ with gr.Column(visible=False) as advanced_column:
140
+ processor_res = gr.Slider(label="Annotator resolution", value=64, minimum=64, maximum=2048, interactive=False)
141
+ threshold_a = gr.Slider(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False)
142
+ threshold_b = gr.Slider(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False)
143
+ with gr.Row(visible=False) as vid_path_row:
144
+ vid_path = gr.Textbox(value='', label="ControlNet Input Video/ Image Path", interactive=True)
145
+ with gr.Row(visible=False) as mask_vid_path_row: # invisible temporarily since 26-04-23 until masks are fixed
146
+ mask_vid_path = gr.Textbox(value='', label="ControlNet Mask Video/ Image Path (*NOT WORKING, kept in UI for CN's devs testing!*)", interactive=True)
147
+ with gr.Row(visible=False) as control_mode_row:
148
+ control_mode = gr.Radio(choices=["Balanced", "My prompt is more important", "ControlNet is more important"], value="Balanced", label="Control Mode", interactive=True)
149
+ with gr.Row(visible=False) as env_row:
150
+ resize_mode = gr.Radio(choices=["Outer Fit (Shrink to Fit)", "Inner Fit (Scale to Fit)", "Just Resize"], value="Inner Fit (Scale to Fit)", label="Resize Mode", interactive=True)
151
+ with gr.Row(visible=False) as control_loopback_row:
152
+ loopback_mode = gr.Checkbox(label="LoopBack mode", value=False, interactive=True)
153
+ hide_output_list = [pixel_perfect, low_vram, mod_row, module, weight_row, start_cs_row, end_cs_row, env_row, overwrite_frames, vid_path_row, control_mode_row, mask_vid_path_row,
154
+ control_loopback_row] # add mask_vid_path_row when masks are working again
155
+ for cn_output in hide_output_list:
156
+ enabled.change(fn=hide_ui_by_cn_status, inputs=enabled, outputs=cn_output)
157
+ module.change(build_sliders, inputs=[module, pixel_perfect], outputs=[processor_res, threshold_a, threshold_b, advanced_column, model, refresh_models])
158
+ # hide vid/image input fields
159
+ loopback_outs = [vid_path_row, mask_vid_path_row]
160
+ for loopback_output in loopback_outs:
161
+ loopback_mode.change(fn=hide_file_textboxes, inputs=loopback_mode, outputs=loopback_output)
162
+ # handle pixel perfect ui changes
163
+ pixel_perfect.change(build_sliders, inputs=[module, pixel_perfect], outputs=[processor_res, threshold_a, threshold_b, advanced_column, model, refresh_models])
164
+ infotext_fields.extend([
165
+ (module, f"ControlNet Preprocessor"),
166
+ (model, f"ControlNet Model"),
167
+ (weight, f"ControlNet Weight"),
168
+ ])
169
+
170
+ return {key: value for key, value in locals().items() if key in [
171
+ "enabled", "pixel_perfect", "low_vram", "module", "model", "weight",
172
+ "guidance_start", "guidance_end", "processor_res", "threshold_a", "threshold_b", "resize_mode", "control_mode",
173
+ "overwrite_frames", "vid_path", "mask_vid_path", "loopback_mode"
174
+ ]}
175
+
176
+ def refresh_all_models(*inputs):
177
+ cn_models = cnet.get_models(update=True)
178
+ dd = inputs[0]
179
+ selected = dd if dd in cn_models else "None"
180
+ return gr.Dropdown.update(value=selected, choices=cn_models)
181
+
182
+ with gr.TabItem('ControlNet'):
183
+ gr.HTML(controlnet_infotext())
184
+ with gr.Tabs():
185
+ model_params = {}
186
+ for i in range(1, num_of_models + 1):
187
+ with gr.Tab(f"CN Model {i}"):
188
+ model_params[i] = create_model_in_tab_ui(i)
189
+
190
+ for key, value in model_params[i].items():
191
+ locals()[f"cn_{i}_{key}"] = value
192
+
193
+ return locals()
194
+
195
+ def setup_controlnet_ui():
196
+ if not find_controlnet():
197
+ gr.HTML("""<a style='target='_blank' href='https://github.com/Mikubill/sd-webui-controlnet'>ControlNet not found. Please install it :)</a>""", elem_id='controlnet_not_found_html_msg')
198
+ return {}
199
+
200
+ try:
201
+ return setup_controlnet_ui_raw()
202
+ except Exception as e:
203
+ print(f"'ControlNet UI setup failed with error: '{e}'!")
204
+ gr.HTML(f"""
205
+ Failed to setup ControlNet UI, check the reason in your commandline log. Please, downgrade your CN extension to <a style='color:Orange;' target='_blank' href='https://github.com/Mikubill/sd-webui-controlnet/archive/c9340671d6d59e5a79fc404f78f747f969f87374.zip'>c9340671d6d59e5a79fc404f78f747f969f87374</a> or report the problem <a style='color:Orange;' target='_blank' href='https://github.com/Mikubill/sd-webui-controlnet/issues'>here</a>.
206
+ """, elem_id='controlnet_not_found_html_msg')
207
+ return {}
208
+
209
+ def controlnet_component_names():
210
+ if not find_controlnet():
211
+ return []
212
+
213
+ return [f'cn_{i}_{component}' for i in range(1, num_of_models + 1) for component in [
214
+ 'overwrite_frames', 'vid_path', 'mask_vid_path', 'enabled',
215
+ 'low_vram', 'pixel_perfect',
216
+ 'module', 'model', 'weight', 'guidance_start', 'guidance_end',
217
+ 'processor_res', 'threshold_a', 'threshold_b', 'resize_mode', 'control_mode', 'loopback_mode'
218
+ ]]
219
+
220
+ def process_with_controlnet(p, args, anim_args, controlnet_args, root, parseq_adapter, is_img2img=True, frame_idx=0):
221
+ CnSchKeys = ControlNetKeys(anim_args, controlnet_args) if not parseq_adapter.use_parseq else parseq_adapter.cn_keys
222
+
223
+ def read_cn_data(cn_idx):
224
+ cn_mask_np, cn_image_np = None, None
225
+ # Loopback mode ENABLED:
226
+ if getattr(controlnet_args, f'cn_{cn_idx}_loopback_mode'):
227
+ # On very first frame, check if use init enabled, and if init image is provided
228
+ if frame_idx == 0 and args.use_init and (args.init_image is not None or args.init_image_box is not None):
229
+ cn_image_np = load_image(args.init_image, args.init_image_box)
230
+ # convert to uint8 for compatibility with CN
231
+ cn_image_np = np.array(cn_image_np).astype('uint8')
232
+ # Not first frame, use previous img (init_sample)
233
+ elif frame_idx > 0 and root.init_sample:
234
+ cn_image_np = np.array(root.init_sample).astype('uint8')
235
+ else: # loopback mode is DISABLED
236
+ cn_inputframes = os.path.join(args.outdir, f'controlnet_{cn_idx}_inputframes') # set input frames folder path
237
+ if os.path.exists(cn_inputframes):
238
+ if count_files_in_folder(cn_inputframes) == 1:
239
+ cn_frame_path = os.path.join(cn_inputframes, "000000000.jpg")
240
+ print(f'Reading ControlNet *static* base frame at {cn_frame_path}')
241
+ else:
242
+ cn_frame_path = os.path.join(cn_inputframes, f"{frame_idx:09}.jpg")
243
+ print(f'Reading ControlNet {cn_idx} base frame #{frame_idx} at {cn_frame_path}')
244
+ if os.path.exists(cn_frame_path):
245
+ cn_image_np = np.array(Image.open(cn_frame_path).convert("RGB")).astype('uint8')
246
+ cn_maskframes = os.path.join(args.outdir, f'controlnet_{cn_idx}_maskframes') # set mask frames folder path
247
+ if os.path.exists(cn_maskframes):
248
+ if count_files_in_folder(cn_maskframes) == 1:
249
+ cn_mask_frame_path = os.path.join(cn_inputframes, "000000000.jpg")
250
+ print(f'Reading ControlNet *static* mask frame at {cn_mask_frame_path}')
251
+ else:
252
+ cn_mask_frame_path = os.path.join(args.outdir, f'controlnet_{cn_idx}_maskframes', f"{frame_idx:09}.jpg")
253
+ print(f'Reading ControlNet {cn_idx} mask frame #{frame_idx} at {cn_mask_frame_path}')
254
+ if os.path.exists(cn_mask_frame_path):
255
+ cn_mask_np = np.array(Image.open(cn_mask_frame_path).convert("RGB")).astype('uint8')
256
+
257
+ return cn_mask_np, cn_image_np
258
+
259
+ cnet = find_controlnet()
260
+ cn_data = [read_cn_data(i) for i in range(1, num_of_models + 1)]
261
+
262
+ # Check if any loopback_mode is set to True
263
+ any_loopback_mode = any(getattr(controlnet_args, f'cn_{i}_loopback_mode') for i in range(1, num_of_models + 1))
264
+
265
+ cn_inputframes_list = [os.path.join(args.outdir, f'controlnet_{i}_inputframes') for i in range(1, num_of_models + 1)]
266
+
267
+ if not any(os.path.exists(cn_inputframes) for cn_inputframes in cn_inputframes_list) and not any_loopback_mode:
268
+ print(f'\033[33mNeither the base nor the masking frames for ControlNet were found. Using the regular pipeline\033[0m')
269
+
270
+ # Remove all scripts except controlnet.
271
+ #
272
+ # This is required because controlnet's access to p.script_args invokes @script_args.setter,
273
+ # which triggers *all* alwayson_scripts' setup() functions, with whatever happens to be in script_args.
274
+ # In the case of seed.py (which we really don't need with deforum), this ovewrites our p.seed & co, which we
275
+ # had carefully prepared previously. So let's remove the scripts to avoid the problem.
276
+ #
277
+ # An alternative would be to populate all the args with the correct values
278
+ # for all scripts, but this seems even more fragile, as it would break
279
+ # if a1111 adds or removed scripts.
280
+ #
281
+ # Note that we must copy scripts.scripts_img2img or scripts.scripts_txt2img before mutating it
282
+ # because it persists across requests. Shallow-copying is sufficient because we only mutate a top-level
283
+ # reference (scripts.alwayson_scripts)
284
+ #
285
+ p.scripts = copy.copy(scripts.scripts_img2img if is_img2img else scripts.scripts_txt2img)
286
+ controlnet_script = find_controlnet_script(p)
287
+ p.scripts.alwayson_scripts = [controlnet_script]
288
+ # Filling the list with None is safe because only the length will be considered,
289
+ # and all cn args will be replaced.
290
+ p.script_args_value = [None] * controlnet_script.args_to
291
+
292
+ def create_cnu_dict(cn_args, prefix, img_np, mask_np, frame_idx, CnSchKeys):
293
+
294
+ keys = [
295
+ "enabled", "module", "model", "weight", "resize_mode", "control_mode", "low_vram", "pixel_perfect",
296
+ "processor_res", "threshold_a", "threshold_b", "guidance_start", "guidance_end"
297
+ ]
298
+ cnu = {k: getattr(cn_args, f"{prefix}_{k}") for k in keys}
299
+ model_num = int(prefix.split('_')[-1]) # Extract model number from prefix (e.g., "cn_1" -> 1)
300
+ if 1 <= model_num <= num_of_models:
301
+ # if in loopmode and no init image (img_np, after processing in this case) provided, disable CN unit for the very first frame. Will be enabled in the next frame automatically
302
+ if getattr(cn_args, f"cn_{model_num}_loopback_mode") and frame_idx == 0 and img_np is None:
303
+ cnu['enabled'] = False
304
+ cnu['weight'] = getattr(CnSchKeys, f"cn_{model_num}_weight_schedule_series")[frame_idx]
305
+ cnu['guidance_start'] = getattr(CnSchKeys, f"cn_{model_num}_guidance_start_schedule_series")[frame_idx]
306
+ cnu['guidance_end'] = getattr(CnSchKeys, f"cn_{model_num}_guidance_end_schedule_series")[frame_idx]
307
+ if cnu['enabled']:
308
+ debug_print(f"ControlNet {model_num}: weight={cnu['weight']}, guidance_start={cnu['guidance_start']}, guidance_end={cnu['guidance_end']}")
309
+ cnu['image'] = {'image': img_np, 'mask': mask_np} if mask_np is not None else img_np
310
+
311
+ return cnu
312
+
313
+ masks_np, images_np = zip(*cn_data)
314
+
315
+ cn_units = [cnet.ControlNetUnit(**create_cnu_dict(controlnet_args, f"cn_{i + 1}", img_np, mask_np, frame_idx, CnSchKeys))
316
+ for i, (img_np, mask_np) in enumerate(zip(images_np, masks_np))]
317
+
318
+ cnet.update_cn_script_in_processing(p, cn_units, is_img2img=is_img2img, is_ui=False)
319
+
320
+ def find_controlnet_script(p):
321
+ controlnet_script = next((script for script in p.scripts.alwayson_scripts if script.title().lower() == "controlnet"), None)
322
+ if not controlnet_script:
323
+ raise Exception("ControlNet script not found.")
324
+ return controlnet_script
325
+
326
+ def process_controlnet_input_frames(args, anim_args, controlnet_args, video_path, mask_path, outdir_suffix, id):
327
+ if (video_path or mask_path) and getattr(controlnet_args, f'cn_{id}_enabled'):
328
+ frame_path = os.path.join(args.outdir, f'controlnet_{id}_{outdir_suffix}')
329
+ os.makedirs(frame_path, exist_ok=True)
330
+
331
+ accepted_image_extensions = ('.jpg', '.jpeg', '.png', '.bmp')
332
+ if video_path and video_path.lower().endswith(accepted_image_extensions):
333
+ convert_image(video_path, os.path.join(frame_path, '000000000.jpg'))
334
+ print(f"Copied CN Model {id}'s single input image to inputframes folder!")
335
+ elif mask_path and mask_path.lower().endswith(accepted_image_extensions):
336
+ convert_image(mask_path, os.path.join(frame_path, '000000000.jpg'))
337
+ print(f"Copied CN Model {id}'s single input image to inputframes *mask* folder!")
338
+ else:
339
+ print(f'Unpacking ControlNet {id} {"video mask" if mask_path else "base video"}')
340
+ print(f"Exporting Video Frames to {frame_path}...")
341
+ vid2frames(
342
+ video_path=video_path or mask_path,
343
+ video_in_frame_path=frame_path,
344
+ n=1 if anim_args.animation_mode != 'Video Input' else anim_args.extract_nth_frame,
345
+ overwrite=getattr(controlnet_args, f'cn_{id}_overwrite_frames'),
346
+ extract_from_frame=0 if anim_args.animation_mode != 'Video Input' else anim_args.extract_from_frame,
347
+ extract_to_frame=(anim_args.max_frames - 1) if anim_args.animation_mode != 'Video Input' else anim_args.extract_to_frame,
348
+ numeric_files_output=True
349
+ )
350
+ print(f"Loading {anim_args.max_frames} input frames from {frame_path} and saving video frames to {args.outdir}")
351
+ print(f'ControlNet {id} {"video mask" if mask_path else "base video"} unpacked!')
352
+
353
+ def unpack_controlnet_vids(args, anim_args, controlnet_args):
354
+ # this func gets called from render.py once for an entire animation run -->
355
+ # tries to trigger an extraction of CN input frames (regular + masks) from video or image
356
+ for i in range(1, num_of_models + 1):
357
+ # LoopBack mode is enabled, no need to extract a video or copy an init image
358
+ if getattr(controlnet_args, f'cn_{i}_loopback_mode'):
359
+ print(f"ControlNet #{i} is in LoopBack mode, skipping video/ image extraction stage.")
360
+ continue
361
+ vid_path = clean_gradio_path_strings(getattr(controlnet_args, f'cn_{i}_vid_path', None))
362
+ mask_path = clean_gradio_path_strings(getattr(controlnet_args, f'cn_{i}_mask_vid_path', None))
363
+
364
+ if vid_path: # Process base video, if available
365
+ process_controlnet_input_frames(args, anim_args, controlnet_args, vid_path, None, 'inputframes', i)
366
+
367
+ if mask_path: # Process mask video, if available
368
+ process_controlnet_input_frames(args, anim_args, controlnet_args, None, mask_path, 'maskframes', i)
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_controlnet_gradio.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import gradio as gr
18
+ # print (cnet_1.get_modules())
19
+
20
+ # *** TODO: re-enable table printing! disabled only temp! 13-04-23 ***
21
+ # table = Table(title="ControlNet params",padding=0, box=box.ROUNDED)
22
+
23
+ # TODO: auto infer the names and the values for the table
24
+ # field_names = []
25
+ # field_names += ["module", "model", "weight", "inv", "guide_start", "guide_end", "guess", "resize", "rgb_bgr", "proc res", "thr a", "thr b"]
26
+ # for field_name in field_names:
27
+ # table.add_column(field_name, justify="center")
28
+
29
+ # cn_model_name = str(controlnet_args.cn_1_model)
30
+
31
+ # rows = []
32
+ # rows += [controlnet_args.cn_1_module, cn_model_name[len('control_'):] if 'control_' in cn_model_name else cn_model_name, controlnet_args.cn_1_weight, controlnet_args.cn_1_invert_image, controlnet_args.cn_1_guidance_start, controlnet_args.cn_1_guidance_end, controlnet_args.cn_1_guess_mode, controlnet_args.cn_1_resize_mode, controlnet_args.cn_1_rgbbgr_mode, controlnet_args.cn_1_processor_res, controlnet_args.cn_1_threshold_a, controlnet_args.cn_1_threshold_b]
33
+ # rows = [str(x) for x in rows]
34
+
35
+ # table.add_row(*rows)
36
+ # console.print(table)
37
+
38
+ def hide_ui_by_cn_status(choice):
39
+ return gr.update(visible=True) if choice else gr.update(visible=False)
40
+
41
+ def hide_file_textboxes(choice):
42
+ return gr.update(visible=False) if choice else gr.update(visible=True)
43
+
44
+ class ToolButton(gr.Button, gr.components.FormComponent):
45
+ """Small button with single emoji as text, fits inside gradio forms"""
46
+ def __init__(self, **kwargs):
47
+ super().__init__(variant="tool", **kwargs)
48
+
49
+ def get_block_name(self):
50
+ return "button"
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_tqdm.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import os
18
+ from math import ceil
19
+ import tqdm
20
+ from modules.shared import progress_print_out, opts, cmd_opts
21
+
22
+ class DeforumTQDM:
23
+ def __init__(self, args, anim_args, parseq_args, video_args):
24
+ self._tqdm = None
25
+ self._args = args
26
+ self._anim_args = anim_args
27
+ self._parseq_args = parseq_args
28
+ self._video_args = video_args
29
+
30
+ def reset(self):
31
+ from .animation_key_frames import DeformAnimKeys
32
+ from .parseq_adapter import ParseqAdapter
33
+ deforum_total = 0
34
+ # FIXME: get only amount of steps
35
+ parseq_adapter = ParseqAdapter(self._parseq_args, self._anim_args, self._video_args, None, None, mute=True)
36
+ keys = DeformAnimKeys(self._anim_args) if not parseq_adapter.use_parseq else parseq_adapter.anim_keys
37
+
38
+ start_frame = 0
39
+ if self._anim_args.resume_from_timestring:
40
+ for tmp in os.listdir(self._args.outdir):
41
+ filename = tmp.split("_")
42
+ # don't use saved depth maps to count number of frames
43
+ if self._anim_args.resume_timestring in filename and "depth" not in filename:
44
+ start_frame += 1
45
+ start_frame = start_frame - 1
46
+ using_vid_init = self._anim_args.animation_mode == 'Video Input'
47
+ turbo_steps = 1 if using_vid_init else int(self._anim_args.diffusion_cadence)
48
+ if self._anim_args.resume_from_timestring:
49
+ last_frame = start_frame - 1
50
+ if turbo_steps > 1:
51
+ last_frame -= last_frame % turbo_steps
52
+ if turbo_steps > 1:
53
+ turbo_next_frame_idx = last_frame
54
+ turbo_prev_frame_idx = turbo_next_frame_idx
55
+ start_frame = last_frame + turbo_steps
56
+ frame_idx = start_frame
57
+ had_first = False
58
+ while frame_idx < self._anim_args.max_frames:
59
+ strength = keys.strength_schedule_series[frame_idx]
60
+ if not had_first and self._args.use_init and ((self._args.init_image is not None and self._args.init_image != '') or self._args.init_image_box is not None):
61
+ deforum_total += int(ceil(self._args.steps * (1 - strength)))
62
+ had_first = True
63
+ elif not had_first:
64
+ deforum_total += self._args.steps
65
+ had_first = True
66
+ else:
67
+ deforum_total += int(ceil(self._args.steps * (1 - strength)))
68
+
69
+ if turbo_steps > 1:
70
+ frame_idx += turbo_steps
71
+ else:
72
+ frame_idx += 1
73
+
74
+ self._tqdm = tqdm.tqdm(
75
+ desc="Deforum progress",
76
+ total=deforum_total,
77
+ position=1,
78
+ file=progress_print_out
79
+ )
80
+
81
+ def update(self):
82
+ if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
83
+ return
84
+ if self._tqdm is None:
85
+ self.reset()
86
+ self._tqdm.update()
87
+
88
+ def updateTotal(self, new_total):
89
+ if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
90
+ return
91
+ if self._tqdm is None:
92
+ self.reset()
93
+ self._tqdm.total = new_total
94
+
95
+ def clear(self):
96
+ if self._tqdm is not None:
97
+ self._tqdm.close()
98
+ self._tqdm = None
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deprecation_utils.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ # This file is used to map deprecated setting names in a dictionary
18
+ # and print a message containing the old and the new names
19
+
20
+ deprecation_map = {
21
+ "histogram_matching": None,
22
+ "flip_2d_perspective": "enable_perspective_flip",
23
+ "skip_video_for_run_all": "skip_video_creation",
24
+ "color_coherence": [
25
+ ("Match Frame 0 HSV", "HSV", False),
26
+ ("Match Frame 0 LAB", "LAB", False),
27
+ ("Match Frame 0 RGB", "RGB", False),
28
+ # ,("removed_value", None, True) # for removed values, if we'll need in the future
29
+ ],
30
+ "hybrid_composite": [
31
+ (False, "None", False),
32
+ (True, "Normal", False),
33
+ ],
34
+ "optical_flow_redo_generation": [
35
+ (False, "None", False),
36
+ (True, "DIS Fine", False),
37
+ ],
38
+ "optical_flow_cadence": [
39
+ (False, "None", False),
40
+ (True, "DIS Fine", False),
41
+ ],
42
+ "cn_1_resize_mode": [
43
+ ("Envelope (Outer Fit)", "Outer Fit (Shrink to Fit)", False),
44
+ ("Scale to Fit (Inner Fit)", "Inner Fit (Scale to Fit)", False),
45
+ ],
46
+ "cn_2_resize_mode": [
47
+ ("Envelope (Outer Fit)", "Outer Fit (Shrink to Fit)", False),
48
+ ("Scale to Fit (Inner Fit)", "Inner Fit (Scale to Fit)", False),
49
+ ],
50
+ "cn_3_resize_mode": [
51
+ ("Envelope (Outer Fit)", "Outer Fit (Shrink to Fit)", False),
52
+ ("Scale to Fit (Inner Fit)", "Inner Fit (Scale to Fit)", False),
53
+ ],
54
+ "use_zoe_depth": ("depth_algorithm", [("True", "Zoe+AdaBins (old)"), ("False", "Midas+AdaBins (old)")]),
55
+ }
56
+
57
+ def dynamic_num_to_schedule_formatter(old_value):
58
+ return f"0:({old_value})"
59
+
60
+ for i in range(1, 6): # 5 CN models in total
61
+ deprecation_map[f"cn_{i}_weight"] = dynamic_num_to_schedule_formatter
62
+ deprecation_map[f"cn_{i}_guidance_start"] = dynamic_num_to_schedule_formatter
63
+ deprecation_map[f"cn_{i}_guidance_end"] = dynamic_num_to_schedule_formatter
64
+
65
+ def handle_deprecated_settings(settings_json):
66
+ # Set legacy_colormatch mode to True when importing old files, so results are backwards-compatible. Print a message about it too
67
+ if 'legacy_colormatch' not in settings_json:
68
+ settings_json['legacy_colormatch'] = True
69
+ print('\033[33mlegacy_colormatch is missing from settings file, so we are setting it to *True* for backwards compatability. You are welcome to test your file with that setting being disabled for better color coherency.\033[0m')
70
+ print("")
71
+ for setting_name, deprecation_info in deprecation_map.items():
72
+ if setting_name in settings_json:
73
+ if deprecation_info is None:
74
+ print(f"WARNING: Setting '{setting_name}' has been removed. It will be discarded and the default value used instead!")
75
+ elif isinstance(deprecation_info, tuple):
76
+ new_setting_name, value_map = deprecation_info
77
+ old_value = str(settings_json.pop(setting_name)) # Convert the boolean value to a string for comparison
78
+ new_value = next((v for k, v in value_map if k == old_value), None)
79
+ if new_value is not None:
80
+ print(f"WARNING: Setting '{setting_name}' has been renamed to '{new_setting_name}' with value '{new_value}'. The saved settings file will reflect the change")
81
+ settings_json[new_setting_name] = new_value
82
+ elif callable(deprecation_info):
83
+ old_value = settings_json[setting_name]
84
+ if isinstance(old_value, (int, float)):
85
+ new_value = deprecation_info(old_value)
86
+ print(f"WARNING: Value '{old_value}' for setting '{setting_name}' has been replaced with '{new_value}'. The saved settings file will reflect the change")
87
+ settings_json[setting_name] = new_value
88
+ elif isinstance(deprecation_info, str):
89
+ print(f"WARNING: Setting '{setting_name}' has been renamed to '{deprecation_info}'. The saved settings file will reflect the change")
90
+ settings_json[deprecation_info] = settings_json.pop(setting_name)
91
+ elif isinstance(deprecation_info, list):
92
+ for old_value, new_value, is_removed in deprecation_info:
93
+ if settings_json[setting_name] == old_value:
94
+ if is_removed:
95
+ print(f"WARNING: Value '{old_value}' for setting '{setting_name}' has been removed. It will be discarded and the default value used instead!")
96
+ else:
97
+ print(f"WARNING: Value '{old_value}' for setting '{setting_name}' has been replaced with '{new_value}'. The saved settings file will reflect the change")
98
+ settings_json[setting_name] = new_value
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import gc
18
+ import cv2
19
+ import numpy as np
20
+ import torch
21
+ from PIL import Image
22
+ from einops import rearrange, repeat
23
+ from modules import devices
24
+ from modules.shared import cmd_opts
25
+ from .depth_adabins import AdaBinsModel
26
+ from .depth_leres import LeReSDepth
27
+ from .depth_midas import MidasDepth
28
+ from .depth_zoe import ZoeDepth
29
+ from .general_utils import debug_print
30
+
31
+ class DepthModel:
32
+ _instance = None
33
+
34
+ def __new__(cls, *args, **kwargs):
35
+ keep_in_vram = kwargs.get('keep_in_vram', False)
36
+ depth_algorithm = kwargs.get('depth_algorithm', 'Midas-3-Hybrid')
37
+ Width, Height = kwargs.get('Width', 512), kwargs.get('Height', 512)
38
+ midas_weight = kwargs.get('midas_weight', 0.2)
39
+ model_switched = cls._instance and cls._instance.depth_algorithm != depth_algorithm
40
+ resolution_changed = cls._instance and (cls._instance.Width != Width or cls._instance.Height != Height)
41
+ zoe_algorithm = 'zoe' in depth_algorithm.lower()
42
+ model_deleted = cls._instance and cls._instance.should_delete
43
+
44
+ should_reload = (cls._instance is None or model_deleted or model_switched or (zoe_algorithm and resolution_changed))
45
+
46
+ if should_reload:
47
+ cls._instance = super().__new__(cls)
48
+ cls._instance._initialize(models_path=args[0], device=args[1], half_precision=not cmd_opts.no_half, keep_in_vram=keep_in_vram, depth_algorithm=depth_algorithm, Width=Width, Height=Height, midas_weight=midas_weight)
49
+ elif cls._instance.should_delete and keep_in_vram:
50
+ cls._instance._initialize(models_path=args[0], device=args[1], half_precision=not cmd_opts.no_half, keep_in_vram=keep_in_vram, depth_algorithm=depth_algorithm, Width=Width, Height=Height, midas_weight=midas_weight)
51
+ cls._instance.should_delete = not keep_in_vram
52
+ return cls._instance
53
+
54
+ def _initialize(self, models_path, device, half_precision=not cmd_opts.no_half, keep_in_vram=False, depth_algorithm='Midas-3-Hybrid', Width=512, Height=512, midas_weight=1.0):
55
+ self.models_path = models_path
56
+ self.device = device
57
+ self.half_precision = half_precision
58
+ self.keep_in_vram = keep_in_vram
59
+ self.depth_algorithm = depth_algorithm
60
+ self.Width, self.Height = Width, Height
61
+ self.midas_weight = midas_weight
62
+ self.depth_min, self.depth_max = 1000, -1000
63
+ self.adabins_helper = None
64
+ self._initialize_model()
65
+
66
+ def _initialize_model(self):
67
+ depth_algo = self.depth_algorithm.lower()
68
+ if depth_algo.startswith('zoe'):
69
+ self.zoe_depth = ZoeDepth(self.Width, self.Height)
70
+ if depth_algo == 'zoe+adabins (old)':
71
+ self.adabins_model = AdaBinsModel(self.models_path, keep_in_vram=self.keep_in_vram)
72
+ self.adabins_helper = self.adabins_model.adabins_helper
73
+ elif depth_algo == 'leres':
74
+ self.leres_depth = LeReSDepth(width=448, height=448, models_path=self.models_path, checkpoint_name='res101.pth', backbone='resnext101')
75
+ elif depth_algo == 'adabins':
76
+ self.adabins_model = AdaBinsModel(self.models_path, keep_in_vram=self.keep_in_vram)
77
+ self.adabins_helper = self.adabins_model.adabins_helper
78
+ elif depth_algo.startswith('midas'):
79
+ self.midas_depth = MidasDepth(self.models_path, self.device, half_precision=self.half_precision, midas_model_type=self.depth_algorithm)
80
+ if depth_algo == 'midas+adabins (old)':
81
+ self.adabins_model = AdaBinsModel(self.models_path, keep_in_vram=self.keep_in_vram)
82
+ self.adabins_helper = self.adabins_model.adabins_helper
83
+ else:
84
+ raise Exception(f"Unknown depth_algorithm: {self.depth_algorithm}")
85
+
86
+ def predict(self, prev_img_cv2, midas_weight, half_precision) -> torch.Tensor:
87
+
88
+ img_pil = Image.fromarray(cv2.cvtColor(prev_img_cv2.astype(np.uint8), cv2.COLOR_RGB2BGR))
89
+
90
+ if self.depth_algorithm.lower().startswith('zoe'):
91
+ depth_tensor = self.zoe_depth.predict(img_pil).to(self.device)
92
+ if self.depth_algorithm.lower() == 'zoe+adabins (old)' and midas_weight < 1.0:
93
+ use_adabins, adabins_depth = AdaBinsModel._instance.predict(img_pil, prev_img_cv2)
94
+ if use_adabins: # if there was no error in getting the adabins depth, align midas with adabins
95
+ depth_tensor = self.blend_and_align_with_adabins(depth_tensor, adabins_depth, midas_weight)
96
+ elif self.depth_algorithm.lower() == 'leres':
97
+ depth_tensor = self.leres_depth.predict(prev_img_cv2.astype(np.float32) / 255.0)
98
+ elif self.depth_algorithm.lower() == 'adabins':
99
+ use_adabins, adabins_depth = AdaBinsModel._instance.predict(img_pil, prev_img_cv2)
100
+ depth_tensor = torch.tensor(adabins_depth)
101
+ if use_adabins is False:
102
+ raise Exception("Error getting depth from AdaBins") # TODO: fallback to something else maybe?
103
+ elif self.depth_algorithm.lower().startswith('midas'):
104
+ depth_tensor = self.midas_depth.predict(prev_img_cv2, half_precision)
105
+ if self.depth_algorithm.lower() == 'midas+adabins (old)' and midas_weight < 1.0:
106
+ use_adabins, adabins_depth = AdaBinsModel._instance.predict(img_pil, prev_img_cv2)
107
+ if use_adabins: # if there was no error in getting the adabins depth, align midas with adabins
108
+ depth_tensor = self.blend_and_align_with_adabins(depth_tensor, adabins_depth, midas_weight)
109
+ else: # Unknown!
110
+ raise Exception(f"Unknown depth_algorithm passed to depth.predict function: {self.depth_algorithm}")
111
+
112
+ return depth_tensor
113
+
114
+ def blend_and_align_with_adabins(self, depth_tensor, adabins_depth, midas_weight):
115
+ depth_tensor = torch.subtract(50.0, depth_tensor) / 19.0 # align midas depth with adabins depth. Original alignment code from Disco Diffusion
116
+ blended_depth_map = (depth_tensor.cpu().numpy() * midas_weight + adabins_depth * (1.0 - midas_weight))
117
+ depth_tensor = torch.from_numpy(np.expand_dims(blended_depth_map, axis=0)).squeeze().to(self.device)
118
+ debug_print(f"Blended Midas Depth with AdaBins Depth")
119
+ return depth_tensor
120
+
121
+ def to(self, device):
122
+ self.device = device
123
+ if self.depth_algorithm.lower().startswith('zoe'):
124
+ self.zoe_depth.zoe.to(device)
125
+ elif self.depth_algorithm.lower() == 'leres':
126
+ self.leres_depth.to(device)
127
+ elif self.depth_algorithm.lower().startswith('midas'):
128
+ self.midas_depth.to(device)
129
+ if hasattr(self, 'adabins_model'):
130
+ self.adabins_model.to(device)
131
+ gc.collect()
132
+ torch.cuda.empty_cache()
133
+
134
+ def to_image(self, depth: torch.Tensor):
135
+ depth = depth.cpu().numpy()
136
+ depth = np.expand_dims(depth, axis=0) if len(depth.shape) == 2 else depth
137
+ self.depth_min, self.depth_max = min(self.depth_min, depth.min()), max(self.depth_max, depth.max())
138
+ denom = max(1e-8, self.depth_max - self.depth_min)
139
+ temp = rearrange((depth - self.depth_min) / denom * 255, 'c h w -> h w c')
140
+ return Image.fromarray(repeat(temp, 'h w 1 -> h w c', c=3).astype(np.uint8))
141
+
142
+ def save(self, filename: str, depth: torch.Tensor):
143
+ self.to_image(depth).save(filename)
144
+
145
+ def delete_model(self):
146
+ for attr in ['zoe_depth', 'leres_depth']:
147
+ if hasattr(self, attr):
148
+ getattr(self, attr).delete()
149
+ delattr(self, attr)
150
+
151
+ if hasattr(self, 'midas_depth'):
152
+ del self.midas_depth
153
+
154
+ if hasattr(self, 'adabins_model'):
155
+ self.adabins_model.delete_model()
156
+
157
+ gc.collect()
158
+ torch.cuda.empty_cache()
159
+ devices.torch_gc()
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_adabins.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import torch
18
+ import numpy as np
19
+ from PIL import Image
20
+ import torchvision.transforms.functional as TF
21
+ from .general_utils import download_file_with_checksum
22
+ from infer import InferenceHelper
23
+
24
+ class AdaBinsModel:
25
+ _instance = None
26
+
27
+ def __new__(cls, *args, **kwargs):
28
+ keep_in_vram = kwargs.get('keep_in_vram', False)
29
+ if cls._instance is None:
30
+ cls._instance = super().__new__(cls)
31
+ cls._instance._initialize(*args, keep_in_vram=keep_in_vram)
32
+ return cls._instance
33
+
34
+ def _initialize(self, models_path, keep_in_vram=False):
35
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
36
+ self.keep_in_vram = keep_in_vram
37
+ self.adabins_helper = None
38
+
39
+ download_file_with_checksum(url='https://github.com/hithereai/deforum-for-automatic1111-webui/releases/download/AdaBins/AdaBins_nyu.pt', expected_checksum='643db9785c663aca72f66739427642726b03acc6c4c1d3755a4587aa2239962746410d63722d87b49fc73581dbc98ed8e3f7e996ff7b9c0d56d0fbc98e23e41a', dest_folder=models_path, dest_filename='AdaBins_nyu.pt')
40
+
41
+ self.adabins_helper = InferenceHelper(models_path=models_path, dataset='nyu', device=self.device)
42
+
43
+ def predict(self, img_pil, prev_img_cv2):
44
+ w, h = prev_img_cv2.shape[1], prev_img_cv2.shape[0]
45
+ adabins_depth = np.array([])
46
+ use_adabins = True
47
+ MAX_ADABINS_AREA, MIN_ADABINS_AREA = 500000, 448 * 448
48
+
49
+ image_pil_area, resized = w * h, False
50
+
51
+ if image_pil_area not in range(MIN_ADABINS_AREA, MAX_ADABINS_AREA + 1):
52
+ scale = ((MAX_ADABINS_AREA if image_pil_area > MAX_ADABINS_AREA else MIN_ADABINS_AREA) / image_pil_area) ** 0.5
53
+ depth_input = img_pil.resize((int(w * scale), int(h * scale)), Image.LANCZOS if image_pil_area > MAX_ADABINS_AREA else Image.BICUBIC)
54
+ print(f"AdaBins depth resized to {depth_input.width}x{depth_input.height}")
55
+ resized = True
56
+ else:
57
+ depth_input = img_pil
58
+
59
+ try:
60
+ with torch.no_grad():
61
+ _, adabins_depth = self.adabins_helper.predict_pil(depth_input)
62
+ if resized:
63
+ adabins_depth = TF.resize(torch.from_numpy(adabins_depth), torch.Size([h, w]), interpolation=TF.InterpolationMode.BICUBIC).cpu().numpy()
64
+ adabins_depth = adabins_depth.squeeze()
65
+ except Exception as e:
66
+ print("AdaBins exception encountered. Falling back to pure MiDaS/Zoe (only if running in Legacy Midas/Zoe+AdaBins mode)")
67
+ use_adabins = False
68
+ torch.cuda.empty_cache()
69
+
70
+ return use_adabins, adabins_depth
71
+
72
+ def to(self, device):
73
+ self.device = device
74
+ if self.adabins_helper is not None:
75
+ self.adabins_helper.to(device)
76
+
77
+ def delete_model(self):
78
+ del self.adabins_helper
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_leres.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import torch
18
+ import cv2
19
+ import os
20
+ import numpy as np
21
+ import torchvision.transforms as transforms
22
+ from .general_utils import download_file_with_checksum
23
+ from leres.lib.multi_depth_model_woauxi import RelDepthModel
24
+ from leres.lib.net_tools import load_ckpt
25
+
26
+ class LeReSDepth:
27
+ def __init__(self, width=448, height=448, models_path=None, checkpoint_name='res101.pth', backbone='resnext101'):
28
+ self.width = width
29
+ self.height = height
30
+ self.models_path = models_path
31
+ self.checkpoint_name = checkpoint_name
32
+ self.backbone = backbone
33
+
34
+ download_file_with_checksum(url='https://cloudstor.aarnet.edu.au/plus/s/lTIJF4vrvHCAI31/download', expected_checksum='7fdc870ae6568cb28d56700d0be8fc45541e09cea7c4f84f01ab47de434cfb7463cacae699ad19fe40ee921849f9760dedf5e0dec04a62db94e169cf203f55b1', dest_folder=models_path, dest_filename=self.checkpoint_name)
35
+
36
+ self.depth_model = RelDepthModel(backbone=self.backbone)
37
+ self.depth_model.eval()
38
+ self.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
39
+ self.depth_model.to(self.DEVICE)
40
+ load_ckpt(os.path.join(self.models_path, self.checkpoint_name), self.depth_model, None, None)
41
+
42
+ @staticmethod
43
+ def scale_torch(img):
44
+ if len(img.shape) == 2:
45
+ img = img[np.newaxis, :, :]
46
+ if img.shape[2] == 3:
47
+ transform = transforms.Compose([transforms.ToTensor(),
48
+ transforms.Normalize((0.485, 0.456, 0.406) , (0.229, 0.224, 0.225))])
49
+ img = transform(img)
50
+ else:
51
+ img = img.astype(np.float32)
52
+ img = torch.from_numpy(img)
53
+ return img
54
+
55
+ def predict(self, image):
56
+ resized_image = cv2.resize(image, (self.width, self.height))
57
+ img_torch = self.scale_torch(resized_image)[None, :, :, :]
58
+ pred_depth = self.depth_model.inference(img_torch).cpu().numpy().squeeze()
59
+ pred_depth_ori = cv2.resize(pred_depth, (image.shape[1], image.shape[0]))
60
+ return torch.from_numpy(pred_depth_ori).unsqueeze(0).to(self.DEVICE)
61
+
62
+ def save_raw_depth(self, depth, filepath):
63
+ depth_normalized = (depth / depth.max() * 60000).astype(np.uint16)
64
+ cv2.imwrite(filepath, depth_normalized)
65
+
66
+ def to(self, device):
67
+ self.DEVICE = device
68
+ self.depth_model = self.depth_model.to(device)
69
+
70
+ def delete(self):
71
+ del self.depth_model
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_midas.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import os
18
+ import cv2
19
+ import torch
20
+ import numpy as np
21
+ from .general_utils import download_file_with_checksum
22
+ from midas.dpt_depth import DPTDepthModel
23
+ from midas.transforms import Resize, NormalizeImage, PrepareForNet
24
+ import torchvision.transforms as T
25
+
26
+ class MidasDepth:
27
+ def __init__(self, models_path, device, half_precision=True, midas_model_type='Midas-3-Hybrid'):
28
+ if midas_model_type.lower() == 'midas-3.1-beitlarge':
29
+ self.midas_model_filename = 'dpt_beit_large_512.pt'
30
+ self.midas_model_checksum='66cbb00ea7bccd6e43d3fd277bd21002d8d8c2c5c487e5fcd1e1d70c691688a19122418b3ddfa94e62ab9f086957aa67bbec39afe2b41c742aaaf0699ee50b33'
31
+ self.midas_model_url = 'https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt'
32
+ self.resize_px = 512
33
+ self.backbone = 'beitl16_512'
34
+ else:
35
+ self.midas_model_filename = 'dpt_large-midas-2f21e586.pt'
36
+ self.midas_model_checksum = 'fcc4829e65d00eeed0a38e9001770676535d2e95c8a16965223aba094936e1316d569563552a852d471f310f83f597e8a238987a26a950d667815e08adaebc06'
37
+ self.midas_model_url = 'https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt'
38
+ self.resize_px = 384
39
+ self.backbone = 'vitl16_384'
40
+ self.device = device
41
+ self.normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
42
+ self.midas_transform = T.Compose([
43
+ Resize(self.resize_px, self.resize_px, resize_target=None, keep_aspect_ratio=True, ensure_multiple_of=32,
44
+ resize_method="minimal", image_interpolation_method=cv2.INTER_CUBIC),
45
+ self.normalization,
46
+ PrepareForNet()
47
+ ])
48
+
49
+ download_file_with_checksum(url=self.midas_model_url, expected_checksum=self.midas_model_checksum, dest_folder=models_path, dest_filename=self.midas_model_filename)
50
+
51
+ self.load_midas_model(models_path, self.midas_model_filename)
52
+ if half_precision:
53
+ self.midas_model = self.midas_model.half()
54
+
55
+ def load_midas_model(self, models_path, midas_model_filename):
56
+ model_file = os.path.join(models_path, midas_model_filename)
57
+ print(f"Loading MiDaS model from {midas_model_filename}...")
58
+ self.midas_model = DPTDepthModel(
59
+ path=model_file,
60
+ backbone=self.backbone,
61
+ non_negative=True,
62
+ )
63
+ self.midas_model.eval().to(self.device, memory_format=torch.channels_last if self.device == torch.device("cuda") else None)
64
+
65
+ def predict(self, prev_img_cv2, half_precision):
66
+ img_midas = prev_img_cv2.astype(np.float32) / 255.0
67
+ img_midas_input = self.midas_transform({"image": img_midas})["image"]
68
+ sample = torch.from_numpy(img_midas_input).float().to(self.device).unsqueeze(0)
69
+
70
+ if self.device.type == "cuda" or self.device.type == "mps":
71
+ sample = sample.to(memory_format=torch.channels_last)
72
+ if half_precision:
73
+ sample = sample.half()
74
+
75
+ with torch.no_grad():
76
+ midas_depth = self.midas_model.forward(sample)
77
+ midas_depth = torch.nn.functional.interpolate(
78
+ midas_depth.unsqueeze(1),
79
+ size=img_midas.shape[:2],
80
+ mode="bicubic",
81
+ align_corners=False,
82
+ ).squeeze().cpu().numpy()
83
+
84
+ torch.cuda.empty_cache()
85
+ depth_tensor = torch.from_numpy(np.expand_dims(midas_depth, axis=0)).squeeze().to(self.device)
86
+
87
+ return depth_tensor
88
+
89
+ def to(self, device):
90
+ self.device = device
91
+ self.midas_model = self.midas_model.to(device, memory_format=torch.channels_last if device == torch.device("cuda") else None)
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_zoe.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import torch
18
+ from zoedepth.models.builder import build_model
19
+ from zoedepth.utils.config import get_config
20
+
21
+ class ZoeDepth:
22
+ def __init__(self, width=512, height=512):
23
+ conf = get_config("zoedepth_nk", "infer")
24
+ conf.img_size = [width, height]
25
+ self.model_zoe = build_model(conf)
26
+ self.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
27
+ self.zoe = self.model_zoe.to(self.DEVICE)
28
+ self.width = width
29
+ self.height = height
30
+
31
+ def predict(self, image):
32
+ self.zoe.core.prep.resizer._Resize__width = self.width
33
+ self.zoe.core.prep.resizer._Resize__height = self.height
34
+ depth_tensor = self.zoe.infer_pil(image, output_type="tensor")
35
+ return depth_tensor
36
+
37
+ def to(self, device):
38
+ self.DEVICE = device
39
+ self.zoe = self.model_zoe.to(device)
40
+
41
+ def save_raw_depth(self, depth, filepath):
42
+ depth.save(filepath, format='PNG', mode='I;16')
43
+
44
+ def delete(self):
45
+ del self.model_zoe
46
+ del self.zoe
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/frame_interpolation.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import os
18
+ from pathlib import Path
19
+ from rife.inference_video import run_rife_new_video_infer
20
+ from .video_audio_utilities import get_quick_vid_info, vid2frames, media_file_has_audio, extract_number, ffmpeg_stitch_video
21
+ from film_interpolation.film_inference import run_film_interp_infer
22
+ from .general_utils import duplicate_pngs_from_folder, checksum, convert_images_from_list
23
+ from modules.shared import opts
24
+
25
+ DEBUG_MODE = opts.data.get("deforum_debug_mode_enabled", False)
26
+
27
+ # gets 'RIFE v4.3', returns: 'RIFE43'
28
+ def extract_rife_name(string):
29
+ parts = string.split()
30
+ if len(parts) != 2 or parts[0] != "RIFE" or (parts[1][0] != "v" or not parts[1][1:].replace('.','').isdigit()):
31
+ raise ValueError("Input string should contain exactly 2 words, first word should be 'RIFE' and second word should start with 'v' followed by 2 numbers")
32
+ return "RIFE"+parts[1][1:].replace('.','')
33
+
34
+ # This function usually gets a filename, and converts it to a legal linux/windows *folder* name
35
+ def clean_folder_name(string):
36
+ illegal_chars = "/\\<>:\"|?*.,\" "
37
+ translation_table = str.maketrans(illegal_chars, "_"*len(illegal_chars))
38
+ return string.translate(translation_table)
39
+
40
+ def set_interp_out_fps(interp_x, slow_x_enabled, slom_x, in_vid_fps):
41
+ if interp_x == 'Disabled' or in_vid_fps in ('---', None, '', 'None'):
42
+ return '---'
43
+
44
+ fps = float(in_vid_fps) * int(interp_x)
45
+ # if slom_x != -1:
46
+ if slow_x_enabled:
47
+ fps /= int(slom_x)
48
+ return int(fps) if fps.is_integer() else fps
49
+
50
+ # get uploaded video frame count, fps, and return 3 valuees for the gradio UI: in fcount, in fps, out fps (using the set_interp_out_fps function above)
51
+ def gradio_f_interp_get_fps_and_fcount(vid_path, interp_x, slow_x_enabled, slom_x):
52
+ if vid_path is None:
53
+ return '---', '---', '---'
54
+ fps, fcount, resolution = get_quick_vid_info(vid_path.name)
55
+ expected_out_fps = set_interp_out_fps(interp_x, slow_x_enabled, slom_x, fps)
56
+ return (str(round(fps,2)) if fps is not None else '---', (round(fcount,2)) if fcount is not None else '---', round(expected_out_fps,2))
57
+
58
+ # handle call to interpolate an uploaded video from gradio button in args.py (the function that calls this func is named 'upload_vid_to_rife')
59
+ def process_interp_vid_upload_logic(file, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, in_vid_fps, f_models_path, vid_file_name):
60
+
61
+ print("got a request to *frame interpolate* an existing video.")
62
+
63
+ _, _, resolution = get_quick_vid_info(file.name)
64
+ folder_name = clean_folder_name(Path(vid_file_name).stem)
65
+ outdir = opts.outdir_samples or os.path.join(os.getcwd(), 'outputs')
66
+ outdir_no_tmp = outdir + f'/frame-interpolation/{folder_name}'
67
+ i = 1
68
+ while os.path.exists(outdir_no_tmp):
69
+ outdir_no_tmp = f"{outdir}/frame-interpolation/{folder_name}_{i}"
70
+ i += 1
71
+
72
+ outdir = os.path.join(outdir_no_tmp, 'tmp_input_frames')
73
+ os.makedirs(outdir, exist_ok=True)
74
+
75
+ vid2frames(video_path=file.name, video_in_frame_path=outdir, overwrite=True, extract_from_frame=0, extract_to_frame=-1, numeric_files_output=True, out_img_format='png')
76
+
77
+ # check if the uploaded vid has an audio stream. If it doesn't, set audio param to None so that ffmpeg won't try to add non-existing audio to final video.
78
+ audio_file_to_pass = None
79
+ if media_file_has_audio(file.name, f_location):
80
+ audio_file_to_pass = file.name
81
+
82
+ process_video_interpolation(frame_interpolation_engine=engine, frame_interpolation_x_amount=x_am, frame_interpolation_slow_mo_enabled = sl_enabled,frame_interpolation_slow_mo_amount=sl_am, orig_vid_fps=in_vid_fps, deforum_models_path=f_models_path, real_audio_track=audio_file_to_pass, raw_output_imgs_path=outdir, img_batch_id=None, ffmpeg_location=f_location, ffmpeg_crf=f_crf, ffmpeg_preset=f_preset, keep_interp_imgs=keep_imgs, orig_vid_name=folder_name, resolution=resolution)
83
+
84
+ # handle params before talking with the actual interpolation module (rifee/film, more to be added)
85
+ def process_video_interpolation(frame_interpolation_engine, frame_interpolation_x_amount, frame_interpolation_slow_mo_enabled, frame_interpolation_slow_mo_amount, orig_vid_fps, deforum_models_path, real_audio_track, raw_output_imgs_path, img_batch_id, ffmpeg_location, ffmpeg_crf, ffmpeg_preset, keep_interp_imgs, orig_vid_name, resolution, dont_change_fps=False, srt_path=None):
86
+
87
+ is_random_pics_run = dont_change_fps
88
+ fps = float(orig_vid_fps) * (1 if is_random_pics_run else frame_interpolation_x_amount)
89
+ fps /= int(frame_interpolation_slow_mo_amount) if frame_interpolation_slow_mo_enabled and not is_random_pics_run else 1
90
+
91
+ # disable audio-adding by setting real_audio_track to None if slow-mo is enabled
92
+ if real_audio_track is not None and frame_interpolation_slow_mo_enabled:
93
+ real_audio_track = None
94
+
95
+ # disable subtitles by setting srt_path to None if slow-mo is enabled'
96
+ if srt_path is not None and frame_interpolation_slow_mo_enabled:
97
+ srt_path = None
98
+
99
+ if frame_interpolation_engine == 'None':
100
+ return
101
+ elif frame_interpolation_engine.startswith("RIFE"):
102
+ # make sure interp_x is valid and in range
103
+ if frame_interpolation_x_amount not in range(2, 11):
104
+ raise Error("frame_interpolation_x_amount must be between 2x and 10x")
105
+
106
+ # set UHD to True if res' is 2K or higher
107
+ if resolution:
108
+ UHD = resolution[0] >= 2048 and resolution[1] >= 2048
109
+ else:
110
+ UHD = False
111
+ # e.g from "RIFE v2.3 to RIFE23"
112
+ actual_model_folder_name = extract_rife_name(frame_interpolation_engine)
113
+
114
+ # run actual rife interpolation and video stitching etc - the whole suite
115
+ return run_rife_new_video_infer(interp_x_amount=frame_interpolation_x_amount, slow_mo_enabled = frame_interpolation_slow_mo_enabled, slow_mo_x_amount=frame_interpolation_slow_mo_amount, model=actual_model_folder_name, fps=fps, deforum_models_path=deforum_models_path, audio_track=real_audio_track, raw_output_imgs_path=raw_output_imgs_path, img_batch_id=img_batch_id, ffmpeg_location=ffmpeg_location, ffmpeg_crf=ffmpeg_crf, ffmpeg_preset=ffmpeg_preset, keep_imgs=keep_interp_imgs, orig_vid_name=orig_vid_name, UHD=UHD, srt_path=srt_path)
116
+ elif frame_interpolation_engine == 'FILM':
117
+ return prepare_film_inference(deforum_models_path=deforum_models_path, x_am=frame_interpolation_x_amount, sl_enabled=frame_interpolation_slow_mo_enabled, sl_am=frame_interpolation_slow_mo_amount, keep_imgs=keep_interp_imgs, raw_output_imgs_path=raw_output_imgs_path, img_batch_id=img_batch_id, f_location=ffmpeg_location, f_crf=ffmpeg_crf, f_preset=ffmpeg_preset, fps=fps, audio_track=real_audio_track, orig_vid_name=orig_vid_name, is_random_pics_run=is_random_pics_run, srt_path=srt_path)
118
+ else:
119
+ print("Unknown Frame Interpolation engine chosen. Doing nothing.")
120
+ return None
121
+
122
+ def prepare_film_inference(deforum_models_path, x_am, sl_enabled, sl_am, keep_imgs, raw_output_imgs_path, img_batch_id, f_location, f_crf, f_preset, fps, audio_track, orig_vid_name, is_random_pics_run, srt_path=None):
123
+ import shutil
124
+
125
+ parent_folder = os.path.dirname(raw_output_imgs_path)
126
+ grandparent_folder = os.path.dirname(parent_folder)
127
+ if orig_vid_name is not None:
128
+ interp_vid_path = os.path.join(parent_folder, str(orig_vid_name) +'_FILM_x' + str(x_am))
129
+ else:
130
+ interp_vid_path = os.path.join(raw_output_imgs_path, str(img_batch_id) +'_FILM_x' + str(x_am))
131
+
132
+ film_model_name = 'film_net_fp16.pt'
133
+ film_model_folder = os.path.join(deforum_models_path,'film_interpolation')
134
+ film_model_path = os.path.join(film_model_folder, film_model_name) # actual full path to the film .pt model file
135
+ output_interp_imgs_folder = os.path.join(raw_output_imgs_path, 'interpolated_frames_film')
136
+ # set custom name depending on if we interpolate after a run, or interpolate a video (related/unrelated to deforum, we don't know) directly from within the interpolation tab
137
+ # interpolated_path = os.path.join(args.raw_output_imgs_path, 'interpolated_frames_rife')
138
+ if orig_vid_name is not None: # interpolating a video/ set of pictures (deforum or unrelated)
139
+ custom_interp_path = "{}_{}".format(output_interp_imgs_folder, orig_vid_name)
140
+ else: # interpolating after a deforum run:
141
+ custom_interp_path = "{}_{}".format(output_interp_imgs_folder, img_batch_id)
142
+
143
+ # interp_vid_path = os.path.join(raw_output_imgs_path, str(img_batch_id) + '_FILM_x' + str(x_am))
144
+ img_path_for_ffmpeg = os.path.join(custom_interp_path, "frame_%09d.png")
145
+
146
+ if sl_enabled:
147
+ interp_vid_path = interp_vid_path + '_slomo_x' + str(sl_am)
148
+ interp_vid_path = interp_vid_path + '.mp4'
149
+
150
+ # In this folder we temporarily keep the original frames (converted/ copy-pasted and img format depends on scenario)
151
+ temp_convert_raw_png_path = os.path.join(raw_output_imgs_path, "tmp_film_folder")
152
+ if is_random_pics_run: # pass dummy so it just copy-paste the imgs instead of re-writing them
153
+ total_frames = duplicate_pngs_from_folder(raw_output_imgs_path, temp_convert_raw_png_path, img_batch_id, 'DUMMY')
154
+ else: #re-write pics as png to avert a problem with 24 and 32 mixed outputs from the same animation run
155
+ total_frames = duplicate_pngs_from_folder(raw_output_imgs_path, temp_convert_raw_png_path, img_batch_id, None)
156
+ check_and_download_film_model('film_net_fp16.pt', film_model_folder) # TODO: split this part
157
+
158
+ # get number of in-between-frames to provide to FILM - mimics how RIFE works, we should get the same amount of total frames in the end
159
+ film_in_between_frames_count = calculate_frames_to_add(total_frames, x_am)
160
+ # Run actual FILM inference
161
+ run_film_interp_infer(
162
+ model_path = film_model_path,
163
+ input_folder = temp_convert_raw_png_path,
164
+ save_folder = custom_interp_path, # output folder is created in the infer part
165
+ inter_frames = film_in_between_frames_count)
166
+
167
+ add_soundtrack = 'None'
168
+ if not audio_track is None:
169
+ add_soundtrack = 'File'
170
+
171
+ print (f"*Passing interpolated frames to ffmpeg...*")
172
+ exception_raised = False
173
+ try:
174
+ ffmpeg_stitch_video(ffmpeg_location=f_location, fps=fps, outmp4_path=interp_vid_path, stitch_from_frame=0, stitch_to_frame=999999999, imgs_path=img_path_for_ffmpeg, add_soundtrack=add_soundtrack, audio_path=audio_track, crf=f_crf, preset=f_preset, srt_path=srt_path)
175
+ except Exception as e:
176
+ exception_raised = True
177
+ print(f"An error occurred while stitching the video: {e}")
178
+
179
+ if orig_vid_name and (keep_imgs or exception_raised):
180
+ shutil.move(custom_interp_path, parent_folder)
181
+ if not keep_imgs and not exception_raised:
182
+ if fps <= 450: # keep interp frames automatically if out_vid fps is above 450
183
+ shutil.rmtree(custom_interp_path, ignore_errors=True)
184
+ # delete duplicated raw non-interpolated frames
185
+ shutil.rmtree(temp_convert_raw_png_path, ignore_errors=True)
186
+ # remove folder with raw (non-interpolated) vid input frames in case of input VID and not PNGs
187
+ if orig_vid_name:
188
+ shutil.rmtree(raw_output_imgs_path, ignore_errors=True)
189
+
190
+ return interp_vid_path
191
+
192
+ def check_and_download_film_model(model_name, model_dest_folder):
193
+ from basicsr.utils.download_util import load_file_from_url
194
+ if model_name == 'film_net_fp16.pt':
195
+ model_dest_path = os.path.join(model_dest_folder, model_name)
196
+ download_url = 'https://github.com/hithereai/frame-interpolation-pytorch/releases/download/film_net_fp16.pt/film_net_fp16.pt'
197
+ film_model_hash = '0a823815b111488ac2b7dd7fe6acdd25d35a22b703e8253587764cf1ee3f8f93676d24154d9536d2ce5bc3b2f102fb36dfe0ca230dfbe289d5cd7bde5a34ec12'
198
+ else: # Unknown FILM model
199
+ raise Exception("Got a request to download an unknown FILM model. Can't proceed.")
200
+ if os.path.exists(model_dest_path):
201
+ return
202
+ try:
203
+ os.makedirs(model_dest_folder, exist_ok=True)
204
+ # download film model from url
205
+ load_file_from_url(download_url, model_dest_folder)
206
+ # verify checksum
207
+ if checksum(model_dest_path) != film_model_hash:
208
+ raise Exception(f"Error while downloading {model_name}. Please download from: {download_url}, and put in: {model_dest_folder}")
209
+ except Exception as e:
210
+ raise Exception(f"Error while downloading {model_name}. Please download from: {download_url}, and put in: {model_dest_folder}")
211
+
212
+ # get film no. of frames to add after each pic from tot frames in interp_x values
213
+ def calculate_frames_to_add(total_frames, interp_x):
214
+ frames_to_add = (total_frames * interp_x - total_frames) / (total_frames - 1)
215
+ return int(round(frames_to_add))
216
+
217
+ def process_interp_pics_upload_logic(pic_list, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, fps, f_models_path, resolution, add_soundtrack, audio_track):
218
+ pic_path_list = [pic.name for pic in pic_list]
219
+ print(f"got a request to *frame interpolate* a set of {len(pic_list)} images.")
220
+ folder_name = clean_folder_name(Path(pic_list[0].orig_name).stem)
221
+ outdir_no_tmp = os.path.join(os.getcwd(), 'outputs', 'frame-interpolation', folder_name)
222
+ i = 1
223
+ while os.path.exists(outdir_no_tmp):
224
+ outdir_no_tmp = os.path.join(os.getcwd(), 'outputs', 'frame-interpolation', folder_name + '_' + str(i))
225
+ i += 1
226
+
227
+ outdir = os.path.join(outdir_no_tmp, 'tmp_input_frames')
228
+ os.makedirs(outdir, exist_ok=True)
229
+
230
+ convert_images_from_list(paths=pic_path_list, output_dir=outdir,format='png')
231
+
232
+ audio_file_to_pass = None
233
+ # todo? add handling of vid input sound? if needed at all...
234
+ if add_soundtrack == 'File':
235
+ audio_file_to_pass = audio_track
236
+ # todo: upgrade function so it takes url and check if audio really exist before passing? not crucial as ffmpeg sofly fallbacks if needed
237
+ # if media_file_has_audio(audio_track, f_location):
238
+
239
+ # pass param so it won't duplicate the images at all as we already do it in here?!
240
+ process_video_interpolation(frame_interpolation_engine=engine, frame_interpolation_x_amount=x_am, frame_interpolation_slow_mo_enabled = sl_enabled,frame_interpolation_slow_mo_amount=sl_am, orig_vid_fps=fps, deforum_models_path=f_models_path, real_audio_track=audio_file_to_pass, raw_output_imgs_path=outdir, img_batch_id=None, ffmpeg_location=f_location, ffmpeg_crf=f_crf, ffmpeg_preset=f_preset, keep_interp_imgs=keep_imgs, orig_vid_name=folder_name, resolution=resolution, dont_change_fps=True)
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/general_utils.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import os
18
+ import shutil
19
+ import hashlib
20
+ from modules.shared import opts
21
+ from basicsr.utils.download_util import load_file_from_url
22
+
23
+ def debug_print(message):
24
+ DEBUG_MODE = opts.data.get("deforum_debug_mode_enabled", False)
25
+ if DEBUG_MODE:
26
+ print(message)
27
+
28
+ def checksum(filename, hash_factory=hashlib.blake2b, chunk_num_blocks=128):
29
+ h = hash_factory()
30
+ with open(filename,'rb') as f:
31
+ while chunk := f.read(chunk_num_blocks*h.block_size):
32
+ h.update(chunk)
33
+ return h.hexdigest()
34
+
35
+ def get_os():
36
+ import platform
37
+ return {"Windows": "Windows", "Linux": "Linux", "Darwin": "Mac"}.get(platform.system(), "Unknown")
38
+
39
+ # used in src/rife/inference_video.py and more, soon
40
+ def duplicate_pngs_from_folder(from_folder, to_folder, img_batch_id, orig_vid_name):
41
+ import cv2
42
+ #TODO: don't copy-paste at all if the input is a video (now it copy-pastes, and if input is deforum run is also converts to make sure no errors rise cuz of 24-32 bit depth differences)
43
+ temp_convert_raw_png_path = os.path.join(from_folder, to_folder)
44
+ os.makedirs(temp_convert_raw_png_path, exist_ok=True)
45
+
46
+ frames_handled = 0
47
+ for f in os.listdir(from_folder):
48
+ if ('png' in f or 'jpg' in f) and '-' not in f and '_depth_' not in f and ((img_batch_id is not None and f.startswith(img_batch_id) or img_batch_id is None)):
49
+ frames_handled +=1
50
+ original_img_path = os.path.join(from_folder, f)
51
+ if orig_vid_name is not None:
52
+ shutil.copy(original_img_path, temp_convert_raw_png_path)
53
+ else:
54
+ image = cv2.imread(original_img_path)
55
+ new_path = os.path.join(temp_convert_raw_png_path, f)
56
+ cv2.imwrite(new_path, image, [cv2.IMWRITE_PNG_COMPRESSION, 0])
57
+ return frames_handled
58
+
59
+ def convert_images_from_list(paths, output_dir, format):
60
+ import os
61
+ from PIL import Image
62
+ # Ensure that the output directory exists
63
+ os.makedirs(output_dir, exist_ok=True)
64
+
65
+ # Loop over all input images
66
+ for i, path in enumerate(paths):
67
+ # Open the image
68
+ with Image.open(path) as img:
69
+ # Generate the output filename
70
+ filename = f"{i+1:09d}.{format}"
71
+ # Save the image to the output directory
72
+ img.save(os.path.join(output_dir, filename))
73
+
74
+ def get_deforum_version():
75
+ from modules import extensions as mext
76
+ try:
77
+ for ext in mext.extensions:
78
+ if ext.name in ["deforum", "deforum-for-automatic1111-webui", "sd-webui-deforum"] and ext.enabled:
79
+ ext.read_info_from_repo() # need this call to get exten info on ui-launch, not to be removed
80
+ return ext.version
81
+ return "Unknown"
82
+ except:
83
+ return "Unknown"
84
+
85
+ def custom_placeholder_format(value_dict, placeholder_match):
86
+ key = placeholder_match.group(1).lower()
87
+ value = value_dict.get(key, key) or "_"
88
+ if isinstance(value, dict) and value:
89
+ first_key = list(value.keys())[0]
90
+ value = str(value[first_key][0]) if isinstance(value[first_key], list) and value[first_key] else str(value[first_key])
91
+ return str(value)[:50]
92
+
93
+ def test_long_path_support(base_folder_path):
94
+ long_folder_name = 'A' * 300
95
+ long_path = os.path.join(base_folder_path, long_folder_name)
96
+ try:
97
+ os.makedirs(long_path)
98
+ shutil.rmtree(long_path)
99
+ return True
100
+ except OSError:
101
+ return False
102
+
103
+ def get_max_path_length(base_folder_path):
104
+ if get_os() == 'Windows':
105
+ return (32767 if test_long_path_support(base_folder_path) else 260) - len(base_folder_path) - 1
106
+ return 4096 - len(base_folder_path) - 1
107
+
108
+ def substitute_placeholders(template, arg_list, base_folder_path):
109
+ import re
110
+ # Find and update timestring values if resume_from_timestring is True
111
+ resume_from_timestring = next((arg_obj.resume_from_timestring for arg_obj in arg_list if hasattr(arg_obj, 'resume_from_timestring')), False)
112
+ resume_timestring = next((arg_obj.resume_timestring for arg_obj in arg_list if hasattr(arg_obj, 'resume_timestring')), None)
113
+
114
+ if resume_from_timestring and resume_timestring:
115
+ for arg_obj in arg_list:
116
+ if hasattr(arg_obj, 'timestring'):
117
+ arg_obj.timestring = resume_timestring
118
+
119
+ max_length = get_max_path_length(base_folder_path)
120
+ values = {attr.lower(): getattr(arg_obj, attr)
121
+ for arg_obj in arg_list
122
+ for attr in dir(arg_obj) if not callable(getattr(arg_obj, attr)) and not attr.startswith('__')}
123
+ formatted_string = re.sub(r"{(\w+)}", lambda m: custom_placeholder_format(values, m), template)
124
+ formatted_string = re.sub(r'[<>:"/\\|?*\s,]', '_', formatted_string)
125
+ return formatted_string[:max_length]
126
+
127
+ def count_files_in_folder(folder_path):
128
+ import glob
129
+ file_pattern = folder_path + "/*"
130
+ file_count = len(glob.glob(file_pattern))
131
+ return file_count
132
+
133
+ def clean_gradio_path_strings(input_str):
134
+ if isinstance(input_str, str) and input_str.startswith('"') and input_str.endswith('"'):
135
+ return input_str[1:-1]
136
+ else:
137
+ return input_str
138
+
139
+ def download_file_with_checksum(url, expected_checksum, dest_folder, dest_filename):
140
+ expected_full_path = os.path.join(dest_folder, dest_filename)
141
+ if not os.path.exists(expected_full_path) and not os.path.isdir(expected_full_path):
142
+ load_file_from_url(url=url, model_dir=dest_folder, file_name=dest_filename, progress=True)
143
+ if checksum(expected_full_path) != expected_checksum:
144
+ raise Exception(f"Error while downloading {dest_filename}.]nPlease manually download from: {url}\nAnd place it in: {dest_folder}")
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/generate.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ from PIL import Image
18
+ import math
19
+ import json
20
+ import itertools
21
+ import requests
22
+ import numexpr
23
+ from modules import processing, sd_models
24
+ from modules.shared import sd_model, state, cmd_opts
25
+ from .deforum_controlnet import is_controlnet_enabled, process_with_controlnet
26
+ from .prompt import split_weighted_subprompts
27
+ from .load_images import load_img, prepare_mask, check_mask_for_errors
28
+ from .webui_sd_pipeline import get_webui_sd_pipeline
29
+ from .rich import console
30
+ from .defaults import get_samplers_list
31
+ from .prompt import check_is_number
32
+ import cv2
33
+ import numpy as np
34
+ from types import SimpleNamespace
35
+
36
+ from .general_utils import debug_print
37
+
38
+ def load_mask_latent(mask_input, shape):
39
+ # mask_input (str or PIL Image.Image): Path to the mask image or a PIL Image object
40
+ # shape (list-like len(4)): shape of the image to match, usually latent_image.shape
41
+
42
+ if isinstance(mask_input, str): # mask input is probably a file name
43
+ if mask_input.startswith('http://') or mask_input.startswith('https://'):
44
+ mask_image = Image.open(requests.get(mask_input, stream=True).raw).convert('RGBA')
45
+ else:
46
+ mask_image = Image.open(mask_input).convert('RGBA')
47
+ elif isinstance(mask_input, Image.Image):
48
+ mask_image = mask_input
49
+ else:
50
+ raise Exception("mask_input must be a PIL image or a file name")
51
+
52
+ mask_w_h = (shape[-1], shape[-2])
53
+ mask = mask_image.resize(mask_w_h, resample=Image.LANCZOS)
54
+ mask = mask.convert("L")
55
+ return mask
56
+
57
+ def isJson(myjson):
58
+ try:
59
+ json.loads(myjson)
60
+ except ValueError as e:
61
+ return False
62
+ return True
63
+
64
+ # Add pairwise implementation here not to upgrade
65
+ # the whole python to 3.10 just for one function
66
+ def pairwise_repl(iterable):
67
+ a, b = itertools.tee(iterable)
68
+ next(b, None)
69
+ return zip(a, b)
70
+
71
+ def generate(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame=0, sampler_name=None):
72
+ if state.interrupted:
73
+ return None
74
+
75
+ if args.reroll_blank_frames == 'ignore':
76
+ return generate_inner(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame, sampler_name)
77
+
78
+ image, caught_vae_exception = generate_with_nans_check(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame, sampler_name)
79
+
80
+ if caught_vae_exception or not image.getbbox():
81
+ patience = args.reroll_patience
82
+ print("Blank frame detected! If you don't have the NSFW filter enabled, this may be due to a glitch!")
83
+ if args.reroll_blank_frames == 'reroll':
84
+ while caught_vae_exception or not image.getbbox():
85
+ print("Rerolling with +1 seed...")
86
+ args.seed += 1
87
+ image, caught_vae_exception = generate_with_nans_check(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame, sampler_name)
88
+ patience -= 1
89
+ if patience == 0:
90
+ print("Rerolling with +1 seed failed for 10 iterations! Try setting webui's precision to 'full' and if it fails, please report this to the devs! Interrupting...")
91
+ state.interrupted = True
92
+ state.assign_current_image(image)
93
+ return None
94
+ elif args.reroll_blank_frames == 'interrupt':
95
+ print("Interrupting to save your eyes...")
96
+ state.interrupted = True
97
+ state.assign_current_image(image)
98
+ return None
99
+ return image
100
+
101
+ def generate_with_nans_check(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame=0, sampler_name=None):
102
+ if cmd_opts.disable_nan_check:
103
+ image = generate_inner(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame, sampler_name)
104
+ else:
105
+ try:
106
+ image = generate_inner(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame, sampler_name)
107
+ except Exception as e:
108
+ if "A tensor with all NaNs was produced in VAE." in repr(e):
109
+ print(e)
110
+ return None, True
111
+ else:
112
+ raise e
113
+ return image, False
114
+
115
+ def generate_inner(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame=0, sampler_name=None):
116
+ # Setup the pipeline
117
+ p = get_webui_sd_pipeline(args, root)
118
+ p.prompt, p.negative_prompt = split_weighted_subprompts(args.prompt, frame, anim_args.max_frames)
119
+
120
+ if not args.use_init and args.strength > 0 and args.strength_0_no_init:
121
+ args.strength = 0
122
+ processed = None
123
+ mask_image = None
124
+ init_image = None
125
+ image_init0 = None
126
+ image_init0_box = None
127
+
128
+ if loop_args.use_looper and anim_args.animation_mode in ['2D', '3D']:
129
+
130
+ debug_print(f"Looper: use_looper={loop_args.use_looper}, imageStrength={loop_args.imageStrength}, blendFactorMax={loop_args.blendFactorMax}, blendFactorSlope={loop_args.blendFactorSlope}, tweeningFrames={loop_args.tweeningFrameSchedule}, colorCorrectionFactor={loop_args.colorCorrectionFactor}")
131
+ args.strength = loop_args.imageStrength
132
+ tweeningFrames = loop_args.tweeningFrameSchedule
133
+ blendFactor = .07
134
+ colorCorrectionFactor = loop_args.colorCorrectionFactor
135
+ jsonImages = json.loads(loop_args.imagesToKeyframe)
136
+ # find which image to show
137
+ parsedImages = {}
138
+ frameToChoose = 0
139
+ max_f = anim_args.max_frames - 1
140
+
141
+ for key, value in jsonImages.items():
142
+ if check_is_number(key): # default case 0:(1 + t %5), 30:(5-t%2)
143
+ parsedImages[key] = value
144
+ else: # math on the left hand side case 0:(1 + t %5), maxKeyframes/2:(5-t%2)
145
+ parsedImages[int(numexpr.evaluate(key))] = value
146
+
147
+ framesToImageSwapOn = list(map(int, list(parsedImages.keys())))
148
+
149
+ for swappingFrame in framesToImageSwapOn[1:]:
150
+ frameToChoose += (frame >= int(swappingFrame))
151
+
152
+ # find which frame to do our swapping on for tweening
153
+ skipFrame = 25
154
+ for fs, fe in pairwise_repl(framesToImageSwapOn):
155
+ if fs <= frame <= fe:
156
+ skipFrame = fe - fs
157
+
158
+ if frame % skipFrame <= tweeningFrames: # number of tweening frames
159
+ blendFactor = loop_args.blendFactorMax - loop_args.blendFactorSlope * math.cos((frame % tweeningFrames) / (tweeningFrames / 2))
160
+ init_image2, _ = load_img(list(jsonImages.values())[frameToChoose],
161
+ None, # init_image_box not used in this case
162
+ shape=(args.W, args.H),
163
+ use_alpha_as_mask=args.use_alpha_as_mask)
164
+ image_init0 = list(jsonImages.values())[0]
165
+
166
+ else: # they passed in a single init image
167
+ image_init0 = args.init_image
168
+ image_init0_box = args.init_image_box
169
+
170
+ available_samplers = get_samplers_list()
171
+ if sampler_name is not None:
172
+ if sampler_name in available_samplers.keys():
173
+ p.sampler_name = available_samplers[sampler_name]
174
+ else:
175
+ raise RuntimeError(f"Sampler name '{sampler_name}' is invalid. Please check the available sampler list in the 'Run' tab")
176
+
177
+ if args.checkpoint is not None:
178
+ info = sd_models.get_closet_checkpoint_match(args.checkpoint)
179
+ if info is None:
180
+ raise RuntimeError(f"Unknown checkpoint: {args.checkpoint}")
181
+ sd_models.reload_model_weights(info=info)
182
+
183
+ if root.init_sample is not None:
184
+ # TODO: cleanup init_sample remains later
185
+ img = root.init_sample
186
+ init_image = img
187
+ if loop_args.use_looper and isJson(loop_args.imagesToKeyframe) and anim_args.animation_mode in ['2D', '3D']:
188
+ init_image = Image.blend(init_image, init_image2, blendFactor)
189
+ correction_colors = Image.blend(init_image, init_image2, colorCorrectionFactor)
190
+ p.color_corrections = [processing.setup_color_correction(correction_colors)]
191
+
192
+ # this is the first pass
193
+ elif (loop_args.use_looper and anim_args.animation_mode in ['2D', '3D']) or (args.use_init and ((args.init_image != None and args.init_image != '') or args.init_image_box != None)):
194
+ init_image, mask_image = load_img(image_init0, # initial init image
195
+ image_init0_box, # initial init image from box (if single init image is used, not json list)
196
+ shape=(args.W, args.H),
197
+ use_alpha_as_mask=args.use_alpha_as_mask)
198
+
199
+ else:
200
+
201
+ if anim_args.animation_mode != 'Interpolation':
202
+ print(f"Not using an init image (doing pure txt2img)")
203
+
204
+ if args.motion_preview_mode:
205
+ state.assign_current_image(root.default_img)
206
+ processed = SimpleNamespace(images = [root.default_img], info = "Generating motion preview...")
207
+ else:
208
+ p_txt = processing.StableDiffusionProcessingTxt2Img(
209
+ sd_model=sd_model,
210
+ outpath_samples=root.tmp_deforum_run_duplicated_folder,
211
+ outpath_grids=root.tmp_deforum_run_duplicated_folder,
212
+ prompt=p.prompt,
213
+ styles=p.styles,
214
+ negative_prompt=p.negative_prompt,
215
+ seed=p.seed,
216
+ subseed=p.subseed,
217
+ subseed_strength=p.subseed_strength,
218
+ seed_resize_from_h=p.seed_resize_from_h,
219
+ seed_resize_from_w=p.seed_resize_from_w,
220
+ sampler_name=p.sampler_name,
221
+ batch_size=p.batch_size,
222
+ n_iter=p.n_iter,
223
+ steps=p.steps,
224
+ cfg_scale=p.cfg_scale,
225
+ width=p.width,
226
+ height=p.height,
227
+ restore_faces=p.restore_faces,
228
+ tiling=p.tiling,
229
+ enable_hr=False,
230
+ denoising_strength=0,
231
+ )
232
+
233
+ print_combined_table(args, anim_args, p_txt, keys, frame) # print dynamic table to cli
234
+
235
+ if is_controlnet_enabled(controlnet_args):
236
+ process_with_controlnet(p_txt, args, anim_args, controlnet_args, root, parseq_adapter, is_img2img=False, frame_idx=frame)
237
+
238
+ processed = processing.process_images(p_txt)
239
+
240
+ try:
241
+ p_txt.close()
242
+ except Exception as e:
243
+ ...
244
+
245
+ if processed is None:
246
+ # Mask functions
247
+ if args.use_mask:
248
+ mask_image = args.mask_image
249
+ mask = prepare_mask(args.mask_file if mask_image is None else mask_image,
250
+ (args.W, args.H),
251
+ args.mask_contrast_adjust,
252
+ args.mask_brightness_adjust)
253
+ p.inpainting_mask_invert = args.invert_mask
254
+ p.inpainting_fill = args.fill
255
+ p.inpaint_full_res = args.full_res_mask
256
+ p.inpaint_full_res_padding = args.full_res_mask_padding
257
+ # prevent loaded mask from throwing errors in Image operations if completely black and crop and resize in webui pipeline
258
+ # doing this after contrast and brightness adjustments to ensure that mask is not passed as black or blank
259
+ mask = check_mask_for_errors(mask, args.invert_mask)
260
+ root.noise_mask = mask
261
+ else:
262
+ mask = None
263
+
264
+ assert not ((mask is not None and args.use_mask and args.overlay_mask) and (
265
+ root.init_sample is None and init_image is None)), "Need an init image when use_mask == True and overlay_mask == True"
266
+
267
+ p.init_images = [init_image]
268
+ p.image_mask = mask
269
+ p.image_cfg_scale = args.pix2pix_img_cfg_scale
270
+
271
+ print_combined_table(args, anim_args, p, keys, frame) # print dynamic table to cli
272
+
273
+ if args.motion_preview_mode:
274
+ processed = mock_process_images(args, p, init_image)
275
+ else:
276
+ if is_controlnet_enabled(controlnet_args):
277
+ process_with_controlnet(p, args, anim_args, controlnet_args, root, parseq_adapter, is_img2img=True, frame_idx=frame)
278
+
279
+ processed = processing.process_images(p)
280
+
281
+
282
+ if root.initial_info is None:
283
+ root.initial_info = processed.info
284
+
285
+ if root.first_frame is None:
286
+ root.first_frame = processed.images[0]
287
+
288
+ results = processed.images[0]
289
+
290
+ return results
291
+
292
+ # Run this instead of actual diffusion when doing motion preview.
293
+ def mock_process_images(args, p, init_image):
294
+
295
+ input_image = cv2.cvtColor(np.array(init_image), cv2.COLOR_RGB2BGR)
296
+
297
+ start_point = (int(args.H/3), int(args.W/3))
298
+ end_point = (int(args.H-args.H/3), int(args.W-args.W/3))
299
+ color = (255, 255, 255, float(p.denoising_strength))
300
+ thickness = 2
301
+ mock_generated_image = np.zeros_like(input_image, np.uint8)
302
+ cv2.rectangle(mock_generated_image, start_point, end_point, color, thickness)
303
+
304
+
305
+ blend = cv2.addWeighted(input_image, float(1.0-p.denoising_strength), mock_generated_image, float(p.denoising_strength), 0)
306
+
307
+ image = Image.fromarray(cv2.cvtColor(blend, cv2.COLOR_BGR2RGB))
308
+ state.assign_current_image(image)
309
+ return SimpleNamespace(images = [image], info = "Generating motion preview...")
310
+
311
+ def print_combined_table(args, anim_args, p, keys, frame_idx):
312
+ from rich.table import Table
313
+ from rich import box
314
+
315
+ table = Table(padding=0, box=box.ROUNDED)
316
+
317
+ field_names1 = ["Steps", "CFG"]
318
+ if anim_args.animation_mode != 'Interpolation':
319
+ field_names1.append("Denoise")
320
+ field_names1 += ["Subseed", "Subs. str"] * (anim_args.enable_subseed_scheduling)
321
+ field_names1 += ["Sampler"] * anim_args.enable_sampler_scheduling
322
+ field_names1 += ["Checkpoint"] * anim_args.enable_checkpoint_scheduling
323
+
324
+ for field_name in field_names1:
325
+ table.add_column(field_name, justify="center")
326
+
327
+ rows1 = [str(p.steps), str(p.cfg_scale)]
328
+ if anim_args.animation_mode != 'Interpolation':
329
+ rows1.append(f"{p.denoising_strength:.5g}" if p.denoising_strength is not None else "None")
330
+
331
+ rows1 += [str(p.subseed), f"{p.subseed_strength:.5g}"] * anim_args.enable_subseed_scheduling
332
+ rows1 += [p.sampler_name] * anim_args.enable_sampler_scheduling
333
+ rows1 += [str(args.checkpoint)] * anim_args.enable_checkpoint_scheduling
334
+
335
+ rows2 = []
336
+ if anim_args.animation_mode not in ['Video Input', 'Interpolation']:
337
+ if anim_args.animation_mode == '2D':
338
+ field_names2 = ["Angle", "Zoom", "Tr C X", "Tr C Y"]
339
+ else:
340
+ field_names2 = []
341
+ field_names2 += ["Tr X", "Tr Y"]
342
+ if anim_args.animation_mode == '3D':
343
+ field_names2 += ["Tr Z", "Ro X", "Ro Y", "Ro Z"]
344
+ if anim_args.aspect_ratio_schedule.replace(" ", "") != '0:(1)':
345
+ field_names2 += ["Asp. Ratio"]
346
+ if anim_args.enable_perspective_flip:
347
+ field_names2 += ["Pf T", "Pf P", "Pf G", "Pf F"]
348
+
349
+ for field_name in field_names2:
350
+ table.add_column(field_name, justify="center")
351
+
352
+ if anim_args.animation_mode == '2D':
353
+ rows2 += [f"{keys.angle_series[frame_idx]:.5g}", f"{keys.zoom_series[frame_idx]:.5g}",
354
+ f"{keys.transform_center_x_series[frame_idx]:.5g}", f"{keys.transform_center_y_series[frame_idx]:.5g}"]
355
+
356
+ rows2 += [f"{keys.translation_x_series[frame_idx]:.5g}", f"{keys.translation_y_series[frame_idx]:.5g}"]
357
+
358
+ if anim_args.animation_mode == '3D':
359
+ rows2 += [f"{keys.translation_z_series[frame_idx]:.5g}", f"{keys.rotation_3d_x_series[frame_idx]:.5g}",
360
+ f"{keys.rotation_3d_y_series[frame_idx]:.5g}", f"{keys.rotation_3d_z_series[frame_idx]:.5g}"]
361
+ if anim_args.aspect_ratio_schedule.replace(" ", "") != '0:(1)':
362
+ rows2 += [f"{keys.aspect_ratio_series[frame_idx]:.5g}"]
363
+ if anim_args.enable_perspective_flip:
364
+ rows2 += [f"{keys.perspective_flip_theta_series[frame_idx]:.5g}", f"{keys.perspective_flip_phi_series[frame_idx]:.5g}",
365
+ f"{keys.perspective_flip_gamma_series[frame_idx]:.5g}", f"{keys.perspective_flip_fv_series[frame_idx]:.5g}"]
366
+
367
+ table.add_row(*rows1, *rows2)
368
+ console.print(table)
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/gradio_funcs.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import gradio as gr
18
+ import modules.paths as ph
19
+ from .general_utils import get_os
20
+ from .upscaling import process_ncnn_upscale_vid_upload_logic
21
+ from .video_audio_utilities import extract_number, get_quick_vid_info, get_ffmpeg_params
22
+ from .frame_interpolation import process_interp_vid_upload_logic, process_interp_pics_upload_logic, gradio_f_interp_get_fps_and_fcount
23
+ from .vid2depth import process_depth_vid_upload_logic
24
+
25
+ f_models_path = ph.models_path + '/Deforum'
26
+
27
+ def handle_change_functions(l_vars):
28
+ l_vars['override_settings_with_file'].change(fn=hide_if_false, inputs=l_vars['override_settings_with_file'], outputs=l_vars['custom_settings_file'])
29
+ l_vars['sampler'].change(fn=show_when_ddim, inputs=l_vars['sampler'], outputs=l_vars['enable_ddim_eta_scheduling'])
30
+ l_vars['sampler'].change(fn=show_when_ancestral_samplers, inputs=l_vars['sampler'], outputs=l_vars['enable_ancestral_eta_scheduling'])
31
+ l_vars['enable_ancestral_eta_scheduling'].change(fn=hide_if_false, inputs=l_vars['enable_ancestral_eta_scheduling'], outputs=l_vars['ancestral_eta_schedule'])
32
+ l_vars['enable_ddim_eta_scheduling'].change(fn=hide_if_false, inputs=l_vars['enable_ddim_eta_scheduling'], outputs=l_vars['ddim_eta_schedule'])
33
+ l_vars['animation_mode'].change(fn=change_max_frames_visibility, inputs=l_vars['animation_mode'], outputs=l_vars['max_frames'])
34
+ diffusion_cadence_outputs = [l_vars['diffusion_cadence'], l_vars['guided_images_accord'], l_vars['optical_flow_cadence_row'], l_vars['cadence_flow_factor_schedule'],
35
+ l_vars['optical_flow_redo_generation'], l_vars['redo_flow_factor_schedule'], l_vars['diffusion_redo']]
36
+ for output in diffusion_cadence_outputs:
37
+ l_vars['animation_mode'].change(fn=change_diffusion_cadence_visibility, inputs=l_vars['animation_mode'], outputs=output)
38
+ three_d_related_outputs = [l_vars['only_3d_motion_column'], l_vars['depth_warp_row_1'], l_vars['depth_warp_row_2'], l_vars['depth_warp_row_3'], l_vars['depth_warp_row_4'],
39
+ l_vars['depth_warp_row_5'], l_vars['depth_warp_row_6'], l_vars['depth_warp_row_7']]
40
+ for output in three_d_related_outputs:
41
+ l_vars['animation_mode'].change(fn=disble_3d_related_stuff, inputs=l_vars['animation_mode'], outputs=output)
42
+ pers_flip_outputs = [l_vars['per_f_th_row'], l_vars['per_f_ph_row'], l_vars['per_f_ga_row'], l_vars['per_f_f_row']]
43
+ for output in pers_flip_outputs:
44
+ l_vars['enable_perspective_flip'].change(fn=hide_if_false, inputs=l_vars['enable_perspective_flip'], outputs=output)
45
+ l_vars['animation_mode'].change(fn=per_flip_handle, inputs=[l_vars['animation_mode'], l_vars['enable_perspective_flip']], outputs=output)
46
+ l_vars['animation_mode'].change(fn=only_show_in_non_3d_mode, inputs=l_vars['animation_mode'], outputs=l_vars['depth_warp_msg_html'])
47
+ l_vars['animation_mode'].change(fn=enable_2d_related_stuff, inputs=l_vars['animation_mode'], outputs=l_vars['only_2d_motion_column'])
48
+ l_vars['animation_mode'].change(fn=disable_by_interpolation, inputs=l_vars['animation_mode'], outputs=l_vars['color_force_grayscale'])
49
+ l_vars['animation_mode'].change(fn=disable_by_interpolation, inputs=l_vars['animation_mode'], outputs=l_vars['noise_tab_column'])
50
+ l_vars['animation_mode'].change(fn=disable_pers_flip_accord, inputs=l_vars['animation_mode'], outputs=l_vars['enable_per_f_row'])
51
+ l_vars['animation_mode'].change(fn=disable_pers_flip_accord, inputs=l_vars['animation_mode'], outputs=l_vars['both_anim_mode_motion_params_column'])
52
+ l_vars['aspect_ratio_use_old_formula'].change(fn=hide_if_true, inputs=l_vars['aspect_ratio_use_old_formula'], outputs=l_vars['aspect_ratio_schedule'])
53
+ l_vars['animation_mode'].change(fn=show_hybrid_html_msg, inputs=l_vars['animation_mode'], outputs=l_vars['hybrid_msg_html'])
54
+ l_vars['animation_mode'].change(fn=change_hybrid_tab_status, inputs=l_vars['animation_mode'], outputs=l_vars['hybrid_sch_accord'])
55
+ l_vars['animation_mode'].change(fn=change_hybrid_tab_status, inputs=l_vars['animation_mode'], outputs=l_vars['hybrid_settings_accord'])
56
+ l_vars['animation_mode'].change(fn=change_hybrid_tab_status, inputs=l_vars['animation_mode'], outputs=l_vars['humans_masking_accord'])
57
+ l_vars['optical_flow_redo_generation'].change(fn=hide_if_none, inputs=l_vars['optical_flow_redo_generation'], outputs=l_vars['redo_flow_factor_schedule_column'])
58
+ l_vars['optical_flow_cadence'].change(fn=hide_if_none, inputs=l_vars['optical_flow_cadence'], outputs=l_vars['cadence_flow_factor_schedule_column'])
59
+ l_vars['seed_behavior'].change(fn=change_seed_iter_visibility, inputs=l_vars['seed_behavior'], outputs=l_vars['seed_iter_N_row'])
60
+ l_vars['seed_behavior'].change(fn=change_seed_schedule_visibility, inputs=l_vars['seed_behavior'], outputs=l_vars['seed_schedule_row'])
61
+ l_vars['color_coherence'].change(fn=change_color_coherence_video_every_N_frames_visibility, inputs=l_vars['color_coherence'], outputs=l_vars['color_coherence_video_every_N_frames_row'])
62
+ l_vars['color_coherence'].change(fn=change_color_coherence_image_path_visibility, inputs=l_vars['color_coherence'], outputs=l_vars['color_coherence_image_path_row'])
63
+ l_vars['noise_type'].change(fn=change_perlin_visibility, inputs=l_vars['noise_type'], outputs=l_vars['perlin_row'])
64
+ l_vars['diffusion_cadence'].change(fn=hide_optical_flow_cadence, inputs=l_vars['diffusion_cadence'], outputs=l_vars['optical_flow_cadence_row'])
65
+ l_vars['depth_algorithm'].change(fn=legacy_3d_mode, inputs=l_vars['depth_algorithm'], outputs=l_vars['midas_weight'])
66
+ l_vars['depth_algorithm'].change(fn=show_leres_html_msg, inputs=l_vars['depth_algorithm'], outputs=l_vars['leres_license_msg'])
67
+ l_vars['fps'].change(fn=change_gif_button_visibility, inputs=l_vars['fps'], outputs=l_vars['make_gif'])
68
+ l_vars['r_upscale_model'].change(fn=update_r_upscale_factor, inputs=l_vars['r_upscale_model'], outputs=l_vars['r_upscale_factor'])
69
+ l_vars['ncnn_upscale_model'].change(fn=update_r_upscale_factor, inputs=l_vars['ncnn_upscale_model'], outputs=l_vars['ncnn_upscale_factor'])
70
+ l_vars['ncnn_upscale_model'].change(update_upscale_out_res_by_model_name, inputs=[l_vars['ncnn_upscale_in_vid_res'], l_vars['ncnn_upscale_model']],
71
+ outputs=l_vars['ncnn_upscale_out_vid_res'])
72
+ l_vars['ncnn_upscale_factor'].change(update_upscale_out_res, inputs=[l_vars['ncnn_upscale_in_vid_res'], l_vars['ncnn_upscale_factor']], outputs=l_vars['ncnn_upscale_out_vid_res'])
73
+ l_vars['vid_to_upscale_chosen_file'].change(vid_upscale_gradio_update_stats, inputs=[l_vars['vid_to_upscale_chosen_file'], l_vars['ncnn_upscale_factor']],
74
+ outputs=[l_vars['ncnn_upscale_in_vid_fps_ui_window'], l_vars['ncnn_upscale_in_vid_frame_count_window'], l_vars['ncnn_upscale_in_vid_res'],
75
+ l_vars['ncnn_upscale_out_vid_res']])
76
+ l_vars['hybrid_comp_mask_type'].change(fn=hide_if_none, inputs=l_vars['hybrid_comp_mask_type'], outputs=l_vars['hybrid_comp_mask_row'])
77
+ hybrid_motion_outputs = [l_vars['hybrid_flow_method'], l_vars['hybrid_flow_factor_schedule'], l_vars['hybrid_flow_consistency'], l_vars['hybrid_consistency_blur'],
78
+ l_vars['hybrid_motion_use_prev_img']]
79
+ for output in hybrid_motion_outputs:
80
+ l_vars['hybrid_motion'].change(fn=disable_by_non_optical_flow, inputs=l_vars['hybrid_motion'], outputs=output)
81
+ l_vars['hybrid_flow_consistency'].change(fn=hide_if_false, inputs=l_vars['hybrid_flow_consistency'], outputs=l_vars['hybrid_consistency_blur'])
82
+ l_vars['hybrid_composite'].change(fn=disable_by_hybrid_composite_dynamic, inputs=[l_vars['hybrid_composite'], l_vars['hybrid_comp_mask_type']], outputs=l_vars['hybrid_comp_mask_row'])
83
+ hybrid_composite_outputs = [l_vars['humans_masking_accord'], l_vars['hybrid_sch_accord'], l_vars['hybrid_comp_mask_type'], l_vars['hybrid_use_first_frame_as_init_image'],
84
+ l_vars['hybrid_use_init_image']]
85
+ for output in hybrid_composite_outputs:
86
+ l_vars['hybrid_composite'].change(fn=hide_if_false, inputs=l_vars['hybrid_composite'], outputs=output)
87
+ hybrid_comp_mask_type_outputs = [l_vars['hybrid_comp_mask_blend_alpha_schedule_row'], l_vars['hybrid_comp_mask_contrast_schedule_row'],
88
+ l_vars['hybrid_comp_mask_auto_contrast_cutoff_high_schedule_row'],
89
+ l_vars['hybrid_comp_mask_auto_contrast_cutoff_low_schedule_row']]
90
+ for output in hybrid_comp_mask_type_outputs:
91
+ l_vars['hybrid_comp_mask_type'].change(fn=hide_if_none, inputs=l_vars['hybrid_comp_mask_type'], outputs=output)
92
+ # End of hybrid related
93
+ skip_video_creation_outputs = [l_vars['fps_out_format_row'], l_vars['soundtrack_row'], l_vars['store_frames_in_ram'], l_vars['make_gif'], l_vars['r_upscale_row'],
94
+ l_vars['delete_imgs'], l_vars['delete_input_frames']]
95
+ for output in skip_video_creation_outputs:
96
+ l_vars['skip_video_creation'].change(fn=change_visibility_from_skip_video, inputs=l_vars['skip_video_creation'], outputs=output)
97
+ l_vars['frame_interpolation_slow_mo_enabled'].change(fn=hide_if_false, inputs=l_vars['frame_interpolation_slow_mo_enabled'], outputs=l_vars['frame_interp_slow_mo_amount_column'])
98
+ l_vars['frame_interpolation_engine'].change(fn=change_interp_x_max_limit, inputs=[l_vars['frame_interpolation_engine'], l_vars['frame_interpolation_x_amount']],
99
+ outputs=l_vars['frame_interpolation_x_amount'])
100
+ # Populate the FPS and FCount values as soon as a video is uploaded to the FileUploadBox (vid_to_interpolate_chosen_file)
101
+ l_vars['vid_to_interpolate_chosen_file'].change(gradio_f_interp_get_fps_and_fcount,
102
+ inputs=[l_vars['vid_to_interpolate_chosen_file'], l_vars['frame_interpolation_x_amount'], l_vars['frame_interpolation_slow_mo_enabled'],
103
+ l_vars['frame_interpolation_slow_mo_amount']],
104
+ outputs=[l_vars['in_vid_fps_ui_window'], l_vars['in_vid_frame_count_window'], l_vars['out_interp_vid_estimated_fps']])
105
+ l_vars['vid_to_interpolate_chosen_file'].change(fn=hide_interp_stats, inputs=[l_vars['vid_to_interpolate_chosen_file']], outputs=[l_vars['interp_live_stats_row']])
106
+ interp_hide_list = [l_vars['frame_interpolation_slow_mo_enabled'], l_vars['frame_interpolation_keep_imgs'], l_vars['frame_interpolation_use_upscaled'], l_vars['frame_interp_amounts_row'], l_vars['interp_existing_video_row']]
107
+ for output in interp_hide_list:
108
+ l_vars['frame_interpolation_engine'].change(fn=hide_interp_by_interp_status, inputs=l_vars['frame_interpolation_engine'], outputs=output)
109
+
110
+ # START gradio-to-frame-interoplation/ upscaling functions
111
+ def upload_vid_to_interpolate(file, engine, x_am, sl_enabled, sl_am, keep_imgs, in_vid_fps):
112
+ # print msg and do nothing if vid not uploaded or interp_x not provided
113
+ if not file or engine == 'None':
114
+ return print("Please upload a video and set a proper value for 'Interp X'. Can't interpolate x0 times :)")
115
+ f_location, f_crf, f_preset = get_ffmpeg_params()
116
+
117
+ process_interp_vid_upload_logic(file, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, in_vid_fps, f_models_path, file.orig_name)
118
+
119
+ def upload_pics_to_interpolate(pic_list, engine, x_am, sl_enabled, sl_am, keep_imgs, fps, add_audio, audio_track):
120
+ from PIL import Image
121
+
122
+ if pic_list is None or len(pic_list) < 2:
123
+ return print("Please upload at least 2 pics for interpolation.")
124
+ f_location, f_crf, f_preset = get_ffmpeg_params()
125
+ # make sure all uploaded pics have the same resolution
126
+ pic_sizes = [Image.open(picture_path.name).size for picture_path in pic_list]
127
+ if len(set(pic_sizes)) != 1:
128
+ return print("All uploaded pics need to be of the same Width and Height / resolution.")
129
+
130
+ resolution = pic_sizes[0]
131
+
132
+ process_interp_pics_upload_logic(pic_list, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, fps, f_models_path, resolution, add_audio, audio_track)
133
+
134
+ def ncnn_upload_vid_to_upscale(vid_path, in_vid_fps, in_vid_res, out_vid_res, upscale_model, upscale_factor, keep_imgs):
135
+ if vid_path is None:
136
+ print("Please upload a video :)")
137
+ return
138
+ f_location, f_crf, f_preset = get_ffmpeg_params()
139
+ current_user = get_os()
140
+ process_ncnn_upscale_vid_upload_logic(vid_path, in_vid_fps, in_vid_res, out_vid_res, f_models_path, upscale_model, upscale_factor, keep_imgs, f_location, f_crf, f_preset, current_user)
141
+
142
+ def upload_vid_to_depth(vid_to_depth_chosen_file, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth, depth_keep_imgs):
143
+ # print msg and do nothing if vid not uploaded
144
+ if not vid_to_depth_chosen_file:
145
+ return print("Please upload a video :()")
146
+ f_location, f_crf, f_preset = get_ffmpeg_params()
147
+
148
+ process_depth_vid_upload_logic(vid_to_depth_chosen_file, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth,
149
+ vid_to_depth_chosen_file.orig_name, depth_keep_imgs, f_location, f_crf, f_preset, f_models_path)
150
+
151
+ # END gradio-to-frame-interoplation/ upscaling functions
152
+
153
+ def change_visibility_from_skip_video(choice):
154
+ return gr.update(visible=False) if choice else gr.update(visible=True)
155
+
156
+ def update_r_upscale_factor(choice):
157
+ return gr.update(value='x4', choices=['x4']) if choice != 'realesr-animevideov3' else gr.update(value='x2', choices=['x2', 'x3', 'x4'])
158
+
159
+ def change_perlin_visibility(choice):
160
+ return gr.update(visible=choice == "perlin")
161
+
162
+ def legacy_3d_mode(choice):
163
+ return gr.update(visible=choice.lower() in ["midas+adabins (old)", 'zoe+adabins (old)'])
164
+
165
+ def change_color_coherence_image_path_visibility(choice):
166
+ return gr.update(visible=choice == "Image")
167
+
168
+ def change_color_coherence_video_every_N_frames_visibility(choice):
169
+ return gr.update(visible=choice == "Video Input")
170
+
171
+ def change_seed_iter_visibility(choice):
172
+ return gr.update(visible=choice == "iter")
173
+
174
+ def change_seed_schedule_visibility(choice):
175
+ return gr.update(visible=choice == "schedule")
176
+
177
+ def disable_pers_flip_accord(choice):
178
+ return gr.update(visible=True) if choice in ['2D', '3D'] else gr.update(visible=False)
179
+
180
+ def per_flip_handle(anim_mode, per_f_enabled):
181
+ if anim_mode in ['2D', '3D'] and per_f_enabled:
182
+ return gr.update(visible=True)
183
+ return gr.update(visible=False)
184
+
185
+ def change_max_frames_visibility(choice):
186
+ return gr.update(visible=choice != "Video Input")
187
+
188
+ def change_diffusion_cadence_visibility(choice):
189
+ return gr.update(visible=choice not in ['Video Input', 'Interpolation'])
190
+
191
+ def disble_3d_related_stuff(choice):
192
+ return gr.update(visible=False) if choice != '3D' else gr.update(visible=True)
193
+
194
+ def only_show_in_non_3d_mode(choice):
195
+ return gr.update(visible=False) if choice == '3D' else gr.update(visible=True)
196
+
197
+ def enable_2d_related_stuff(choice):
198
+ return gr.update(visible=True) if choice == '2D' else gr.update(visible=False)
199
+
200
+ def disable_by_interpolation(choice):
201
+ return gr.update(visible=False) if choice in ['Interpolation'] else gr.update(visible=True)
202
+
203
+ def disable_by_video_input(choice):
204
+ return gr.update(visible=False) if choice in ['Video Input'] else gr.update(visible=True)
205
+
206
+ def hide_if_none(choice):
207
+ return gr.update(visible=choice != "None")
208
+
209
+ def change_gif_button_visibility(choice):
210
+ if choice is None or choice == "":
211
+ return gr.update(visible=True)
212
+ return gr.update(visible=False, value=False) if int(choice) > 30 else gr.update(visible=True)
213
+
214
+ def hide_if_false(choice):
215
+ return gr.update(visible=True) if choice else gr.update(visible=False)
216
+
217
+ def hide_if_true(choice):
218
+ return gr.update(visible=False) if choice else gr.update(visible=True)
219
+
220
+ def disable_by_hybrid_composite_dynamic(choice, comp_mask_type):
221
+ if choice in ['Normal', 'Before Motion', 'After Generation']:
222
+ if comp_mask_type != 'None':
223
+ return gr.update(visible=True)
224
+ return gr.update(visible=False)
225
+
226
+ def disable_by_non_optical_flow(choice):
227
+ return gr.update(visible=False) if choice != 'Optical Flow' else gr.update(visible=True)
228
+
229
+ # Upscaling Gradio UI related funcs
230
+ def vid_upscale_gradio_update_stats(vid_path, upscale_factor):
231
+ if not vid_path:
232
+ return '---', '---', '---', '---'
233
+ factor = extract_number(upscale_factor)
234
+ fps, fcount, resolution = get_quick_vid_info(vid_path.name)
235
+ in_res_str = f"{resolution[0]}*{resolution[1]}"
236
+ out_res_str = f"{resolution[0] * factor}*{resolution[1] * factor}"
237
+ return fps, fcount, in_res_str, out_res_str
238
+
239
+ def update_upscale_out_res(in_res, upscale_factor):
240
+ if not in_res:
241
+ return '---'
242
+ factor = extract_number(upscale_factor)
243
+ w, h = [int(x) * factor for x in in_res.split('*')]
244
+ return f"{w}*{h}"
245
+
246
+ def update_upscale_out_res_by_model_name(in_res, upscale_model_name):
247
+ if not upscale_model_name or in_res == '---':
248
+ return '---'
249
+ factor = 2 if upscale_model_name == 'realesr-animevideov3' else 4
250
+ return f"{int(in_res.split('*')[0]) * factor}*{int(in_res.split('*')[1]) * factor}"
251
+
252
+ def hide_optical_flow_cadence(cadence_value):
253
+ return gr.update(visible=True) if cadence_value > 1 else gr.update(visible=False)
254
+
255
+ def hide_interp_by_interp_status(choice):
256
+ return gr.update(visible=False) if choice == 'None' else gr.update(visible=True)
257
+
258
+ def change_interp_x_max_limit(engine_name, current_value):
259
+ if engine_name == 'FILM':
260
+ return gr.update(maximum=300)
261
+ elif current_value > 10:
262
+ return gr.update(maximum=10, value=2)
263
+ return gr.update(maximum=10)
264
+
265
+ def hide_interp_stats(choice):
266
+ return gr.update(visible=True) if choice is not None else gr.update(visible=False)
267
+
268
+ def show_hybrid_html_msg(choice):
269
+ return gr.update(visible=True) if choice not in ['2D', '3D'] else gr.update(visible=False)
270
+
271
+ def change_hybrid_tab_status(choice):
272
+ return gr.update(visible=True) if choice in ['2D', '3D'] else gr.update(visible=False)
273
+
274
+ def show_leres_html_msg(choice):
275
+ return gr.update(visible=True) if choice.lower() == 'leres' else gr.update(visible=False)
276
+
277
+ def show_when_ddim(sampler_name):
278
+ return gr.update(visible=True) if sampler_name.lower() == 'ddim' else gr.update(visible=False)
279
+
280
+ def show_when_ancestral_samplers(sampler_name):
281
+ return gr.update(visible=True) if sampler_name.lower() in ['euler a', 'dpm++ 2s a', 'dpm2 a', 'dpm2 a karras', 'dpm++ 2s a karras'] else gr.update(visible=False)
282
+
283
+ def change_css(checkbox_status):
284
+ if checkbox_status:
285
+ display = "block"
286
+ else:
287
+ display = "none"
288
+
289
+ html_template = f'''
290
+ <style>
291
+ #tab_deforum_interface .svelte-e8n7p6, #f_interp_accord {{
292
+ display: {display} !important;
293
+ }}
294
+ </style>
295
+ '''
296
+ return html_template
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/human_masking.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import os, cv2
18
+ import torch
19
+ from pathlib import Path
20
+ from multiprocessing import freeze_support
21
+
22
+ def extract_frames(input_video_path, output_imgs_path):
23
+ # Open the video file
24
+ vidcap = cv2.VideoCapture(input_video_path)
25
+
26
+ # Get the total number of frames in the video
27
+ frame_count = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
28
+
29
+ # Create the output directory if it does not exist
30
+ os.makedirs(output_imgs_path, exist_ok=True)
31
+
32
+ # Extract the frames
33
+ for i in range(frame_count):
34
+ success, image = vidcap.read()
35
+ if success:
36
+ cv2.imwrite(os.path.join(output_imgs_path, f"frame{i}.png"), image)
37
+ print(f"{frame_count} frames extracted and saved to {output_imgs_path}")
38
+
39
+ def video2humanmasks(input_frames_path, output_folder_path, output_type, fps):
40
+ # freeze support is needed for video outputting
41
+ freeze_support()
42
+
43
+ # check if input path exists and is a directory
44
+ if not os.path.exists(input_frames_path) or not os.path.isdir(input_frames_path):
45
+ raise ValueError("Invalid input path: {}".format(input_frames_path))
46
+
47
+ # check if output path exists and is a directory
48
+ if not os.path.exists(output_folder_path) or not os.path.isdir(output_folder_path):
49
+ raise ValueError("Invalid output path: {}".format(output_folder_path))
50
+
51
+ # check if output_type is valid
52
+ valid_output_types = ["video", "pngs", "both"]
53
+ if output_type.lower() not in valid_output_types:
54
+ raise ValueError("Invalid output type: {}. Must be one of {}".format(output_type, valid_output_types))
55
+
56
+ # try to predict where torch cache lives, so we can try and fetch models from cache in the next step
57
+ predicted_torch_model_cache_path = os.path.join(Path.home(), ".cache", "torch", "hub", "hithereai_RobustVideoMatting_master")
58
+ predicted_rvm_cache_testilfe = os.path.join(predicted_torch_model_cache_path, "hubconf.py")
59
+
60
+ # try to fetch the models from cache, and only if it can't be find, download from the internet (to enable offline usage)
61
+ try:
62
+ # Try to fetch the models from cache
63
+ convert_video = torch.hub.load(predicted_torch_model_cache_path, "converter", source='local')
64
+ model = torch.hub.load(predicted_torch_model_cache_path, "resnet50", source='local').cuda()
65
+ except:
66
+ # Download from the internet if not found in cache
67
+ convert_video = torch.hub.load("hithereai/RobustVideoMatting", "converter")
68
+ model = torch.hub.load("hithereai/RobustVideoMatting", "resnet50").cuda()
69
+
70
+ output_alpha_vid_path = os.path.join(output_folder_path, "human_masked_video.mp4")
71
+ # extract humans masks from the input folder' imgs.
72
+ # in this step PNGs will be extracted only if output_type is set to PNGs. Otherwise a video will be made, and in the case of Both, the video will be extracted in the next step to PNGs
73
+ convert_video(
74
+ model,
75
+ input_source=input_frames_path, # full path of the folder that contains all of the extracted input imgs
76
+ output_type='video' if output_type.upper() in ("VIDEO", "BOTH") else 'png_sequence',
77
+ output_alpha=output_alpha_vid_path if output_type.upper() in ("VIDEO", "BOTH") else output_folder_path,
78
+ output_video_mbps=4,
79
+ output_video_fps=fps,
80
+ downsample_ratio=None, # None for auto
81
+ seq_chunk=12, # Process n frames at once for better parallelism
82
+ progress=True # show extraction progress
83
+ )
84
+
85
+ if output_type.lower() == "both":
86
+ extract_frames(output_alpha_vid_path, output_folder_path)
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/hybrid_video.py ADDED
@@ -0,0 +1,613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import os
18
+ import pathlib
19
+ import random
20
+ import cv2
21
+ import numpy as np
22
+ import PIL
23
+ from PIL import Image, ImageChops, ImageOps, ImageEnhance
24
+ from scipy.ndimage.filters import gaussian_filter
25
+ from .consistency_check import make_consistency
26
+ from .human_masking import video2humanmasks
27
+ from .load_images import load_image
28
+ from .video_audio_utilities import vid2frames, get_quick_vid_info, get_frame_name
29
+
30
+ def delete_all_imgs_in_folder(folder_path):
31
+ files = list(pathlib.Path(folder_path).glob('*.jpg'))
32
+ files.extend(list(pathlib.Path(folder_path).glob('*.png')))
33
+ for f in files: os.remove(f)
34
+
35
+ def hybrid_generation(args, anim_args, root):
36
+ video_in_frame_path = os.path.join(args.outdir, 'inputframes')
37
+ hybrid_frame_path = os.path.join(args.outdir, 'hybridframes')
38
+ human_masks_path = os.path.join(args.outdir, 'human_masks')
39
+
40
+ # create hybridframes folder whether using init_image or inputframes
41
+ os.makedirs(hybrid_frame_path, exist_ok=True)
42
+
43
+ if anim_args.hybrid_generate_inputframes:
44
+ # create folders for the video input frames and optional hybrid frames to live in
45
+ os.makedirs(video_in_frame_path, exist_ok=True)
46
+
47
+ # delete frames if overwrite = true
48
+ if anim_args.overwrite_extracted_frames:
49
+ delete_all_imgs_in_folder(hybrid_frame_path)
50
+
51
+ # save the video frames from input video
52
+ print(f"Video to extract: {anim_args.video_init_path}")
53
+ print(f"Extracting video (1 every {anim_args.extract_nth_frame}) frames to {video_in_frame_path}...")
54
+ video_fps = vid2frames(video_path=anim_args.video_init_path, video_in_frame_path=video_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame)
55
+
56
+ # extract alpha masks of humans from the extracted input video imgs
57
+ if anim_args.hybrid_generate_human_masks != "None":
58
+ # create a folder for the human masks imgs to live in
59
+ print(f"Checking /creating a folder for the human masks")
60
+ os.makedirs(human_masks_path, exist_ok=True)
61
+
62
+ # delete frames if overwrite = true
63
+ if anim_args.overwrite_extracted_frames:
64
+ delete_all_imgs_in_folder(human_masks_path)
65
+
66
+ # in case that generate_input_frames isn't selected, we won't get the video fps rate as vid2frames isn't called, So we'll check the video fps in here instead
67
+ if not anim_args.hybrid_generate_inputframes:
68
+ _, video_fps, _ = get_quick_vid_info(anim_args.video_init_path)
69
+
70
+ # calculate the correct fps of the masked video according to the original video fps and 'extract_nth_frame'
71
+ output_fps = video_fps/anim_args.extract_nth_frame
72
+
73
+ # generate the actual alpha masks from the input imgs
74
+ print(f"Extracting alpha humans masks from the input frames")
75
+ video2humanmasks(video_in_frame_path, human_masks_path, anim_args.hybrid_generate_human_masks, output_fps)
76
+
77
+ # get sorted list of inputfiles
78
+ inputfiles = sorted(pathlib.Path(video_in_frame_path).glob('*.jpg'))
79
+
80
+ if not anim_args.hybrid_use_init_image:
81
+ # determine max frames from length of input frames
82
+ anim_args.max_frames = len(inputfiles)
83
+ if anim_args.max_frames < 1:
84
+ raise Exception(f"Error: No input frames found in {video_in_frame_path}! Please check your input video path and whether you've opted to extract input frames.")
85
+ print(f"Using {anim_args.max_frames} input frames from {video_in_frame_path}...")
86
+
87
+ # use first frame as init
88
+ if anim_args.hybrid_use_first_frame_as_init_image:
89
+ for f in inputfiles:
90
+ args.init_image = str(f)
91
+ args.init_image_box = None # init_image_box not used in this case
92
+ args.use_init = True
93
+ print(f"Using init_image from video: {args.init_image}")
94
+ break
95
+
96
+ return args, anim_args, inputfiles
97
+
98
+ def hybrid_composite(args, anim_args, frame_idx, prev_img, depth_model, hybrid_comp_schedules, root):
99
+ video_frame = os.path.join(args.outdir, 'inputframes', get_frame_name(anim_args.video_init_path) + f"{frame_idx:09}.jpg")
100
+ video_depth_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_vid_depth{frame_idx:09}.jpg")
101
+ depth_frame = os.path.join(args.outdir, f"{root.timestring}_depth_{frame_idx-1:09}.png")
102
+ mask_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_mask{frame_idx:09}.jpg")
103
+ comp_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_comp{frame_idx:09}.jpg")
104
+ prev_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_prev{frame_idx:09}.jpg")
105
+ prev_img = cv2.cvtColor(prev_img, cv2.COLOR_BGR2RGB)
106
+ prev_img_hybrid = Image.fromarray(prev_img)
107
+ if anim_args.hybrid_use_init_image:
108
+ video_image = load_image(args.init_image, args.init_image_box)
109
+ else:
110
+ video_image = Image.open(video_frame)
111
+ video_image = video_image.resize((args.W, args.H), PIL.Image.LANCZOS)
112
+ hybrid_mask = None
113
+
114
+ # composite mask types
115
+ if anim_args.hybrid_comp_mask_type == 'Depth': # get depth from last generation
116
+ hybrid_mask = Image.open(depth_frame)
117
+ elif anim_args.hybrid_comp_mask_type == 'Video Depth': # get video depth
118
+ video_depth = depth_model.predict(np.array(video_image), anim_args.midas_weight, root.half_precision)
119
+ depth_model.save(video_depth_frame, video_depth)
120
+ hybrid_mask = Image.open(video_depth_frame)
121
+ elif anim_args.hybrid_comp_mask_type == 'Blend': # create blend mask image
122
+ hybrid_mask = Image.blend(ImageOps.grayscale(prev_img_hybrid), ImageOps.grayscale(video_image), hybrid_comp_schedules['mask_blend_alpha'])
123
+ elif anim_args.hybrid_comp_mask_type == 'Difference': # create difference mask image
124
+ hybrid_mask = ImageChops.difference(ImageOps.grayscale(prev_img_hybrid), ImageOps.grayscale(video_image))
125
+
126
+ # optionally invert mask, if mask type is defined
127
+ if anim_args.hybrid_comp_mask_inverse and anim_args.hybrid_comp_mask_type != "None":
128
+ hybrid_mask = ImageOps.invert(hybrid_mask)
129
+
130
+ # if a mask type is selected, make composition
131
+ if hybrid_mask is None:
132
+ hybrid_comp = video_image
133
+ else:
134
+ # ensure grayscale
135
+ hybrid_mask = ImageOps.grayscale(hybrid_mask)
136
+ # equalization before
137
+ if anim_args.hybrid_comp_mask_equalize in ['Before', 'Both']:
138
+ hybrid_mask = ImageOps.equalize(hybrid_mask)
139
+ # contrast
140
+ hybrid_mask = ImageEnhance.Contrast(hybrid_mask).enhance(hybrid_comp_schedules['mask_contrast'])
141
+ # auto contrast with cutoffs lo/hi
142
+ if anim_args.hybrid_comp_mask_auto_contrast:
143
+ hybrid_mask = autocontrast_grayscale(np.array(hybrid_mask), hybrid_comp_schedules['mask_auto_contrast_cutoff_low'], hybrid_comp_schedules['mask_auto_contrast_cutoff_high'])
144
+ hybrid_mask = Image.fromarray(hybrid_mask)
145
+ hybrid_mask = ImageOps.grayscale(hybrid_mask)
146
+ if anim_args.hybrid_comp_save_extra_frames:
147
+ hybrid_mask.save(mask_frame)
148
+ # equalization after
149
+ if anim_args.hybrid_comp_mask_equalize in ['After', 'Both']:
150
+ hybrid_mask = ImageOps.equalize(hybrid_mask)
151
+ # do compositing and save
152
+ hybrid_comp = Image.composite(prev_img_hybrid, video_image, hybrid_mask)
153
+ if anim_args.hybrid_comp_save_extra_frames:
154
+ hybrid_comp.save(comp_frame)
155
+
156
+ # final blend of composite with prev_img, or just a blend if no composite is selected
157
+ hybrid_blend = Image.blend(prev_img_hybrid, hybrid_comp, hybrid_comp_schedules['alpha'])
158
+ if anim_args.hybrid_comp_save_extra_frames:
159
+ hybrid_blend.save(prev_frame)
160
+
161
+ prev_img = cv2.cvtColor(np.array(hybrid_blend), cv2.COLOR_RGB2BGR)
162
+
163
+ # restore to np array and return
164
+ return args, prev_img
165
+
166
+ def get_matrix_for_hybrid_motion(frame_idx, dimensions, inputfiles, hybrid_motion):
167
+ print(f"Calculating {hybrid_motion} RANSAC matrix for frames {frame_idx} to {frame_idx+1}")
168
+ img1 = cv2.cvtColor(get_resized_image_from_filename(str(inputfiles[frame_idx]), dimensions), cv2.COLOR_BGR2GRAY)
169
+ img2 = cv2.cvtColor(get_resized_image_from_filename(str(inputfiles[frame_idx+1]), dimensions), cv2.COLOR_BGR2GRAY)
170
+ M = get_transformation_matrix_from_images(img1, img2, hybrid_motion)
171
+ return M
172
+
173
+ def get_matrix_for_hybrid_motion_prev(frame_idx, dimensions, inputfiles, prev_img, hybrid_motion):
174
+ print(f"Calculating {hybrid_motion} RANSAC matrix for frames {frame_idx} to {frame_idx+1}")
175
+ # first handle invalid images by returning default matrix
176
+ height, width = prev_img.shape[:2]
177
+ if height == 0 or width == 0 or prev_img != np.uint8:
178
+ return get_hybrid_motion_default_matrix(hybrid_motion)
179
+ else:
180
+ prev_img_gray = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)
181
+ img = cv2.cvtColor(get_resized_image_from_filename(str(inputfiles[frame_idx+1]), dimensions), cv2.COLOR_BGR2GRAY)
182
+ M = get_transformation_matrix_from_images(prev_img_gray, img, hybrid_motion)
183
+ return M
184
+
185
+ def get_flow_for_hybrid_motion(frame_idx, dimensions, inputfiles, hybrid_frame_path, prev_flow, method, raft_model, consistency_check=True, consistency_blur=0, do_flow_visualization=False):
186
+ print(f"Calculating {method} optical flow {'w/consistency mask' if consistency_check else ''} for frames {frame_idx} to {frame_idx+1}")
187
+ i1 = get_resized_image_from_filename(str(inputfiles[frame_idx]), dimensions)
188
+ i2 = get_resized_image_from_filename(str(inputfiles[frame_idx+1]), dimensions)
189
+ if consistency_check:
190
+ flow, reliable_flow = get_reliable_flow_from_images(i1, i2, method, raft_model, prev_flow, consistency_blur) # forward flow w/backward consistency check
191
+ if do_flow_visualization: save_flow_mask_visualization(frame_idx, reliable_flow, hybrid_frame_path)
192
+ else:
193
+ flow = get_flow_from_images(i1, i2, method, raft_model, prev_flow) # old single flow forward
194
+ if do_flow_visualization: save_flow_visualization(frame_idx, dimensions, flow, inputfiles, hybrid_frame_path)
195
+ return flow
196
+
197
+ def get_flow_for_hybrid_motion_prev(frame_idx, dimensions, inputfiles, hybrid_frame_path, prev_flow, prev_img, method, raft_model, consistency_check=True, consistency_blur=0, do_flow_visualization=False):
198
+ print(f"Calculating {method} optical flow {'w/consistency mask' if consistency_check else ''} for frames {frame_idx} to {frame_idx+1}")
199
+ reliable_flow = None
200
+ # first handle invalid images by returning default flow
201
+ height, width = prev_img.shape[:2]
202
+ if height == 0 or width == 0:
203
+ flow = get_hybrid_motion_default_flow(dimensions)
204
+ else:
205
+ i1 = prev_img.astype(np.uint8)
206
+ i2 = get_resized_image_from_filename(str(inputfiles[frame_idx+1]), dimensions)
207
+ if consistency_check:
208
+ flow, reliable_flow = get_reliable_flow_from_images(i1, i2, method, raft_model, prev_flow, consistency_blur) # forward flow w/backward consistency check
209
+ if do_flow_visualization: save_flow_mask_visualization(frame_idx, reliable_flow, hybrid_frame_path)
210
+ else:
211
+ flow = get_flow_from_images(i1, i2, method, raft_model, prev_flow)
212
+ if do_flow_visualization: save_flow_visualization(frame_idx, dimensions, flow, inputfiles, hybrid_frame_path)
213
+ return flow
214
+
215
+ def get_reliable_flow_from_images(i1, i2, method, raft_model, prev_flow, consistency_blur, reliability=0):
216
+ flow_forward = get_flow_from_images(i1, i2, method, raft_model, prev_flow)
217
+ flow_backward = get_flow_from_images(i2, i1, method, raft_model, None)
218
+ reliable_flow = make_consistency(flow_forward, flow_backward, edges_unreliable=False)
219
+ if consistency_blur > 0:
220
+ reliable_flow = custom_gaussian_blur(reliable_flow.astype(np.float32), 1, consistency_blur)
221
+ return filter_flow(flow_forward, reliable_flow, consistency_blur, reliability), reliable_flow
222
+
223
+ def custom_gaussian_blur(input_array, blur_size, sigma):
224
+ return gaussian_filter(input_array, sigma=(sigma, sigma, 0), order=0, mode='constant', cval=0.0, truncate=blur_size)
225
+
226
+ def filter_flow(flow, reliable_flow, reliability=0.5, consistency_blur=0):
227
+ # reliability from reliabile flow: -0.75 is bad, 0 is meh/outside, 1 is great
228
+ # Create a mask from the first channel of the reliable_flow array
229
+ mask = reliable_flow[..., 0]
230
+
231
+ # to set everything to 1 or 0 based on reliability
232
+ # mask = np.where(mask >= reliability, 1, 0)
233
+
234
+ # Expand the mask to match the shape of the forward_flow array
235
+ mask = np.repeat(mask[..., np.newaxis], flow.shape[2], axis=2)
236
+
237
+ # Apply the mask to the flow
238
+ return flow * mask
239
+
240
+ def image_transform_ransac(image_cv2, M, hybrid_motion, depth=None):
241
+ if hybrid_motion == "Perspective":
242
+ return image_transform_perspective(image_cv2, M, depth)
243
+ else: # Affine
244
+ return image_transform_affine(image_cv2, M, depth)
245
+
246
+ def image_transform_optical_flow(img, flow, flow_factor):
247
+ # if flow factor not normal, calculate flow factor
248
+ if flow_factor != 1:
249
+ flow = flow * flow_factor
250
+ # flow is reversed, so you need to reverse it:
251
+ flow = -flow
252
+ h, w = img.shape[:2]
253
+ flow[:, :, 0] += np.arange(w)
254
+ flow[:, :, 1] += np.arange(h)[:,np.newaxis]
255
+ return remap(img, flow)
256
+
257
+ def image_transform_affine(image_cv2, M, depth=None):
258
+ if depth is None:
259
+ return cv2.warpAffine(
260
+ image_cv2,
261
+ M,
262
+ (image_cv2.shape[1],image_cv2.shape[0]),
263
+ borderMode=cv2.BORDER_REFLECT_101
264
+ )
265
+ else: # NEED TO IMPLEMENT THE FOLLOWING FUNCTION
266
+ return depth_based_affine_warp(
267
+ image_cv2,
268
+ depth,
269
+ M
270
+ )
271
+
272
+ def image_transform_perspective(image_cv2, M, depth=None):
273
+ if depth is None:
274
+ return cv2.warpPerspective(
275
+ image_cv2,
276
+ M,
277
+ (image_cv2.shape[1], image_cv2.shape[0]),
278
+ borderMode=cv2.BORDER_REFLECT_101
279
+ )
280
+ else: # NEED TO IMPLEMENT THE FOLLOWING FUNCTION
281
+ return render_3d_perspective(
282
+ image_cv2,
283
+ depth,
284
+ M
285
+ )
286
+
287
+ def get_hybrid_motion_default_matrix(hybrid_motion):
288
+ if hybrid_motion == "Perspective":
289
+ arr = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
290
+ else:
291
+ arr = np.array([[1., 0., 0.], [0., 1., 0.]])
292
+ return arr
293
+
294
+ def get_hybrid_motion_default_flow(dimensions):
295
+ cols, rows = dimensions
296
+ flow = np.zeros((rows, cols, 2), np.float32)
297
+ return flow
298
+
299
+ def get_transformation_matrix_from_images(img1, img2, hybrid_motion, confidence=0.75):
300
+ # Create SIFT detector and feature extractor
301
+ sift = cv2.SIFT_create()
302
+
303
+ # Detect keypoints and compute descriptors
304
+ kp1, des1 = sift.detectAndCompute(img1, None)
305
+ kp2, des2 = sift.detectAndCompute(img2, None)
306
+
307
+ # Create BFMatcher object and match descriptors
308
+ bf = cv2.BFMatcher()
309
+ matches = bf.knnMatch(des1, des2, k=2)
310
+
311
+ # Apply ratio test to filter good matches
312
+ good_matches = []
313
+ for m, n in matches:
314
+ if m.distance < confidence * n.distance:
315
+ good_matches.append(m)
316
+
317
+ if len(good_matches) <= 8:
318
+ get_hybrid_motion_default_matrix(hybrid_motion)
319
+
320
+ # Convert keypoints to numpy arrays
321
+ src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
322
+ dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
323
+
324
+ if len(src_pts) <= 8 or len(dst_pts) <= 8:
325
+ return get_hybrid_motion_default_matrix(hybrid_motion)
326
+ elif hybrid_motion == "Perspective": # Perspective transformation (3x3)
327
+ transformation_matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
328
+ return transformation_matrix
329
+ else: # Affine - rigid transformation (no skew 3x2)
330
+ transformation_rigid_matrix, rigid_mask = cv2.estimateAffinePartial2D(src_pts, dst_pts)
331
+ return transformation_rigid_matrix
332
+
333
+ def get_flow_from_images(i1, i2, method, raft_model, prev_flow=None):
334
+ if method == "RAFT":
335
+ if raft_model is None:
336
+ raise Exception("RAFT Model not provided to get_flow_from_images function, cannot continue.")
337
+ return get_flow_from_images_RAFT(i1, i2, raft_model)
338
+ elif method == "DIS Medium":
339
+ return get_flow_from_images_DIS(i1, i2, 'medium', prev_flow)
340
+ elif method == "DIS Fine":
341
+ return get_flow_from_images_DIS(i1, i2, 'fine', prev_flow)
342
+ elif method == "DenseRLOF": # Unused - requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python
343
+ return get_flow_from_images_Dense_RLOF(i1, i2, prev_flow)
344
+ elif method == "SF": # Unused - requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python
345
+ return get_flow_from_images_SF(i1, i2, prev_flow)
346
+ elif method == "DualTVL1": # Unused - requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python
347
+ return get_flow_from_images_DualTVL1(i1, i2, prev_flow)
348
+ elif method == "DeepFlow": # Unused - requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python
349
+ return get_flow_from_images_DeepFlow(i1, i2, prev_flow)
350
+ elif method == "PCAFlow": # Unused - requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python
351
+ return get_flow_from_images_PCAFlow(i1, i2, prev_flow)
352
+ elif method == "Farneback": # Farneback Normal:
353
+ return get_flow_from_images_Farneback(i1, i2, prev_flow)
354
+ # if we reached this point, something went wrong. raise an error:
355
+ raise RuntimeError(f"Invald flow method name: '{method}'")
356
+
357
+ def get_flow_from_images_RAFT(i1, i2, raft_model):
358
+ flow = raft_model.predict(i1, i2)
359
+ return flow
360
+
361
+ def get_flow_from_images_DIS(i1, i2, preset, prev_flow):
362
+ # DIS PRESETS CHART KEY: finest scale, grad desc its, patch size
363
+ # DIS_MEDIUM: 1, 25, 8 | DIS_FAST: 2, 16, 8 | DIS_ULTRAFAST: 2, 12, 8
364
+ if preset == 'medium': preset_code = cv2.DISOPTICAL_FLOW_PRESET_MEDIUM
365
+ elif preset == 'fast': preset_code = cv2.DISOPTICAL_FLOW_PRESET_FAST
366
+ elif preset == 'ultrafast': preset_code = cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST
367
+ elif preset in ['slow','fine']: preset_code = None
368
+ i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY)
369
+ i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY)
370
+ dis = cv2.DISOpticalFlow_create(preset_code)
371
+ # custom presets
372
+ if preset == 'slow':
373
+ dis.setGradientDescentIterations(192)
374
+ dis.setFinestScale(1)
375
+ dis.setPatchSize(8)
376
+ dis.setPatchStride(4)
377
+ if preset == 'fine':
378
+ dis.setGradientDescentIterations(192)
379
+ dis.setFinestScale(0)
380
+ dis.setPatchSize(8)
381
+ dis.setPatchStride(4)
382
+ return dis.calc(i1, i2, prev_flow)
383
+
384
+ def get_flow_from_images_Dense_RLOF(i1, i2, last_flow=None):
385
+ return cv2.optflow.calcOpticalFlowDenseRLOF(i1, i2, flow = last_flow)
386
+
387
+ def get_flow_from_images_SF(i1, i2, last_flow=None, layers = 3, averaging_block_size = 2, max_flow = 4):
388
+ return cv2.optflow.calcOpticalFlowSF(i1, i2, layers, averaging_block_size, max_flow)
389
+
390
+ def get_flow_from_images_DualTVL1(i1, i2, prev_flow):
391
+ i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY)
392
+ i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY)
393
+ f = cv2.optflow.DualTVL1OpticalFlow_create()
394
+ return f.calc(i1, i2, prev_flow)
395
+
396
+ def get_flow_from_images_DeepFlow(i1, i2, prev_flow):
397
+ i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY)
398
+ i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY)
399
+ f = cv2.optflow.createOptFlow_DeepFlow()
400
+ return f.calc(i1, i2, prev_flow)
401
+
402
+ def get_flow_from_images_PCAFlow(i1, i2, prev_flow):
403
+ i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY)
404
+ i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY)
405
+ f = cv2.optflow.createOptFlow_PCAFlow()
406
+ return f.calc(i1, i2, prev_flow)
407
+
408
+ def get_flow_from_images_Farneback(i1, i2, preset="normal", last_flow=None, pyr_scale = 0.5, levels = 3, winsize = 15, iterations = 3, poly_n = 5, poly_sigma = 1.2, flags = 0):
409
+ flags = cv2.OPTFLOW_FARNEBACK_GAUSSIAN # Specify the operation flags
410
+ pyr_scale = 0.5 # The image scale (<1) to build pyramids for each image
411
+ if preset == "fine":
412
+ levels = 13 # The number of pyramid layers, including the initial image
413
+ winsize = 77 # The averaging window size
414
+ iterations = 13 # The number of iterations at each pyramid level
415
+ poly_n = 15 # The size of the pixel neighborhood used to find polynomial expansion in each pixel
416
+ poly_sigma = 0.8 # The standard deviation of the Gaussian used to smooth derivatives used as a basis for the polynomial expansion
417
+ else: # "normal"
418
+ levels = 5 # The number of pyramid layers, including the initial image
419
+ winsize = 21 # The averaging window size
420
+ iterations = 5 # The number of iterations at each pyramid level
421
+ poly_n = 7 # The size of the pixel neighborhood used to find polynomial expansion in each pixel
422
+ poly_sigma = 1.2 # The standard deviation of the Gaussian used to smooth derivatives used as a basis for the polynomial expansion
423
+ i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY)
424
+ i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY)
425
+ flags = 0 # flags = cv2.OPTFLOW_USE_INITIAL_FLOW
426
+ flow = cv2.calcOpticalFlowFarneback(i1, i2, last_flow, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)
427
+ return flow
428
+
429
+ def save_flow_visualization(frame_idx, dimensions, flow, inputfiles, hybrid_frame_path):
430
+ flow_img_file = os.path.join(hybrid_frame_path, f"flow{frame_idx:09}.jpg")
431
+ flow_img = cv2.imread(str(inputfiles[frame_idx]))
432
+ flow_img = cv2.resize(flow_img, (dimensions[0], dimensions[1]), cv2.INTER_AREA)
433
+ flow_img = cv2.cvtColor(flow_img, cv2.COLOR_RGB2GRAY)
434
+ flow_img = cv2.cvtColor(flow_img, cv2.COLOR_GRAY2BGR)
435
+ flow_img = draw_flow_lines_in_grid_in_color(flow_img, flow)
436
+ flow_img = cv2.cvtColor(flow_img, cv2.COLOR_BGR2RGB)
437
+ cv2.imwrite(flow_img_file, flow_img)
438
+ print(f"Saved optical flow visualization: {flow_img_file}")
439
+
440
+ def save_flow_mask_visualization(frame_idx, reliable_flow, hybrid_frame_path, color=True):
441
+ flow_mask_img_file = os.path.join(hybrid_frame_path, f"flow_mask{frame_idx:09}.jpg")
442
+ if color:
443
+ # Normalize the reliable_flow array to the range [0, 255]
444
+ normalized_reliable_flow = (reliable_flow - reliable_flow.min()) / (reliable_flow.max() - reliable_flow.min()) * 255
445
+ # Change the data type to np.uint8
446
+ mask_image = normalized_reliable_flow.astype(np.uint8)
447
+ else:
448
+ # Extract the first channel of the reliable_flow array
449
+ first_channel = reliable_flow[..., 0]
450
+ # Normalize the first channel to the range [0, 255]
451
+ normalized_first_channel = (first_channel - first_channel.min()) / (first_channel.max() - first_channel.min()) * 255
452
+ # Change the data type to np.uint8
453
+ grayscale_image = normalized_first_channel.astype(np.uint8)
454
+ # Replicate the grayscale channel three times to form a BGR image
455
+ mask_image = np.stack((grayscale_image, grayscale_image, grayscale_image), axis=2)
456
+ cv2.imwrite(flow_mask_img_file, mask_image)
457
+ print(f"Saved mask flow visualization: {flow_mask_img_file}")
458
+
459
+ def reliable_flow_to_image(reliable_flow):
460
+ # Extract the first channel of the reliable_flow array
461
+ first_channel = reliable_flow[..., 0]
462
+ # Normalize the first channel to the range [0, 255]
463
+ normalized_first_channel = (first_channel - first_channel.min()) / (first_channel.max() - first_channel.min()) * 255
464
+ # Change the data type to np.uint8
465
+ grayscale_image = normalized_first_channel.astype(np.uint8)
466
+ # Replicate the grayscale channel three times to form a BGR image
467
+ bgr_image = np.stack((grayscale_image, grayscale_image, grayscale_image), axis=2)
468
+ return bgr_image
469
+
470
+ def draw_flow_lines_in_grid_in_color(img, flow, step=8, magnitude_multiplier=1, min_magnitude = 0, max_magnitude = 10000):
471
+ flow = flow * magnitude_multiplier
472
+ h, w = img.shape[:2]
473
+ y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
474
+ fx, fy = flow[y,x].T
475
+ lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
476
+ lines = np.int32(lines + 0.5)
477
+ vis = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
478
+ vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
479
+
480
+ mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
481
+ hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
482
+ hsv[...,0] = ang*180/np.pi/2
483
+ hsv[...,1] = 255
484
+ hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
485
+ bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
486
+ vis = cv2.add(vis, bgr)
487
+
488
+ # Iterate through the lines
489
+ for (x1, y1), (x2, y2) in lines:
490
+ # Calculate the magnitude of the line
491
+ magnitude = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)
492
+
493
+ # Only draw the line if it falls within the magnitude range
494
+ if min_magnitude <= magnitude <= max_magnitude:
495
+ b = int(bgr[y1, x1, 0])
496
+ g = int(bgr[y1, x1, 1])
497
+ r = int(bgr[y1, x1, 2])
498
+ color = (b, g, r)
499
+ cv2.arrowedLine(vis, (x1, y1), (x2, y2), color, thickness=1, tipLength=0.1)
500
+ return vis
501
+
502
+ def draw_flow_lines_in_color(img, flow, threshold=3, magnitude_multiplier=1, min_magnitude = 0, max_magnitude = 10000):
503
+ # h, w = img.shape[:2]
504
+ vis = img.copy() # Create a copy of the input image
505
+
506
+ # Find the locations in the flow field where the magnitude of the flow is greater than the threshold
507
+ mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
508
+ idx = np.where(mag > threshold)
509
+
510
+ # Create HSV image
511
+ hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
512
+ hsv[...,0] = ang*180/np.pi/2
513
+ hsv[...,1] = 255
514
+ hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
515
+
516
+ # Convert HSV image to BGR
517
+ bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
518
+
519
+ # Add color from bgr
520
+ vis = cv2.add(vis, bgr)
521
+
522
+ # Draw an arrow at each of these locations to indicate the direction of the flow
523
+ for i, (y, x) in enumerate(zip(idx[0], idx[1])):
524
+ # Calculate the magnitude of the line
525
+ x2 = x + magnitude_multiplier * int(flow[y, x, 0])
526
+ y2 = y + magnitude_multiplier * int(flow[y, x, 1])
527
+ magnitude = np.sqrt((x2 - x)**2 + (y2 - y)**2)
528
+
529
+ # Only draw the line if it falls within the magnitude range
530
+ if min_magnitude <= magnitude <= max_magnitude:
531
+ if i % random.randint(100, 200) == 0:
532
+ b = int(bgr[y, x, 0])
533
+ g = int(bgr[y, x, 1])
534
+ r = int(bgr[y, x, 2])
535
+ color = (b, g, r)
536
+ cv2.arrowedLine(vis, (x, y), (x2, y2), color, thickness=1, tipLength=0.25)
537
+
538
+ return vis
539
+
540
+ def autocontrast_grayscale(image, low_cutoff=0, high_cutoff=100):
541
+ # Perform autocontrast on a grayscale np array image.
542
+ # Find the minimum and maximum values in the image
543
+ min_val = np.percentile(image, low_cutoff)
544
+ max_val = np.percentile(image, high_cutoff)
545
+
546
+ # Scale the image so that the minimum value is 0 and the maximum value is 255
547
+ image = 255 * (image - min_val) / (max_val - min_val)
548
+
549
+ # Clip values that fall outside the range [0, 255]
550
+ image = np.clip(image, 0, 255)
551
+
552
+ return image
553
+
554
+ def get_resized_image_from_filename(im, dimensions):
555
+ img = cv2.imread(im)
556
+ return cv2.resize(img, (dimensions[0], dimensions[1]), cv2.INTER_AREA)
557
+
558
+ def remap(img, flow):
559
+ border_mode = cv2.BORDER_REFLECT_101
560
+ h, w = img.shape[:2]
561
+ displacement = int(h * 0.25), int(w * 0.25)
562
+ larger_img = cv2.copyMakeBorder(img, displacement[0], displacement[0], displacement[1], displacement[1], border_mode)
563
+ lh, lw = larger_img.shape[:2]
564
+ larger_flow = extend_flow(flow, lw, lh)
565
+ remapped_img = cv2.remap(larger_img, larger_flow, None, cv2.INTER_LINEAR, border_mode)
566
+ output_img = center_crop_image(remapped_img, w, h)
567
+ return output_img
568
+
569
+ def center_crop_image(img, w, h):
570
+ y, x, _ = img.shape
571
+ width_indent = int((x - w) / 2)
572
+ height_indent = int((y - h) / 2)
573
+ cropped_img = img[height_indent:y-height_indent, width_indent:x-width_indent]
574
+ return cropped_img
575
+
576
+ def extend_flow(flow, w, h):
577
+ # Get the shape of the original flow image
578
+ flow_h, flow_w = flow.shape[:2]
579
+ # Calculate the position of the image in the new image
580
+ x_offset = int((w - flow_w) / 2)
581
+ y_offset = int((h - flow_h) / 2)
582
+ # Generate the X and Y grids
583
+ x_grid, y_grid = np.meshgrid(np.arange(w), np.arange(h))
584
+ # Create the new flow image and set it to the X and Y grids
585
+ new_flow = np.dstack((x_grid, y_grid)).astype(np.float32)
586
+ # Shift the values of the original flow by the size of the border
587
+ flow[:,:,0] += x_offset
588
+ flow[:,:,1] += y_offset
589
+ # Overwrite the middle of the grid with the original flow
590
+ new_flow[y_offset:y_offset+flow_h, x_offset:x_offset+flow_w, :] = flow
591
+ # Return the extended image
592
+ return new_flow
593
+
594
+ def abs_flow_to_rel_flow(flow, width, height):
595
+ fx, fy = flow[:,:,0], flow[:,:,1]
596
+ max_flow_x = np.max(np.abs(fx))
597
+ max_flow_y = np.max(np.abs(fy))
598
+ max_flow = max(max_flow_x, max_flow_y)
599
+
600
+ rel_fx = fx / (max_flow * width)
601
+ rel_fy = fy / (max_flow * height)
602
+ return np.dstack((rel_fx, rel_fy))
603
+
604
+ def rel_flow_to_abs_flow(rel_flow, width, height):
605
+ rel_fx, rel_fy = rel_flow[:,:,0], rel_flow[:,:,1]
606
+
607
+ max_flow_x = np.max(np.abs(rel_fx * width))
608
+ max_flow_y = np.max(np.abs(rel_fy * height))
609
+ max_flow = max(max_flow_x, max_flow_y)
610
+
611
+ fx = rel_fx * (max_flow * width)
612
+ fy = rel_fy * (max_flow * height)
613
+ return np.dstack((fx, fy))
extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/image_sharpening.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023 Deforum LLC
2
+ #
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU Affero General Public License as published by
5
+ # the Free Software Foundation, version 3 of the License.
6
+ #
7
+ # This program is distributed in the hope that it will be useful,
8
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
+ # GNU General Public License for more details.
11
+ #
12
+ # You should have received a copy of the GNU Affero General Public License
13
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+
15
+ # Contact the authors: https://deforum.github.io/
16
+
17
+ import cv2
18
+ import numpy as np
19
+
20
+ def unsharp_mask(img, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0, mask=None):
21
+ if amount == 0:
22
+ return img
23
+ # Return a sharpened version of the image, using an unsharp mask.
24
+ # If mask is not None, only areas under mask are handled
25
+ blurred = cv2.GaussianBlur(img, kernel_size, sigma)
26
+ sharpened = float(amount + 1) * img - float(amount) * blurred
27
+ sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
28
+ sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
29
+ sharpened = sharpened.round().astype(np.uint8)
30
+ if threshold > 0:
31
+ low_contrast_mask = np.absolute(img - blurred) < threshold
32
+ np.copyto(sharpened, img, where=low_contrast_mask)
33
+ if mask is not None:
34
+ mask = np.array(mask)
35
+ masked_sharpened = cv2.bitwise_and(sharpened, sharpened, mask=mask)
36
+ masked_img = cv2.bitwise_and(img, img, mask=255-mask)
37
+ sharpened = cv2.add(masked_img, masked_sharpened)
38
+ return sharpened