diff --git a/.gitattributes b/.gitattributes index 28df5f900b358436f0267334b3e3e9af33f917ba..7d79dbc551ed92a45d1aa38599a69a7e77f2e2a9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.jpg filter=lfs diff=lfs merge=lfs -text *.jpeg filter=lfs diff=lfs merge=lfs -text *.webp filter=lfs diff=lfs merge=lfs -text +extensions-builtin/sd-webui-deforum/tests/testdata/example_init_vid.mp4 filter=lfs diff=lfs merge=lfs -text diff --git a/extensions-builtin/sd-webui-deforum/.github/FUNDING.yml b/extensions-builtin/sd-webui-deforum/.github/FUNDING.yml new file mode 100644 index 0000000000000000000000000000000000000000..e707ddb03c159bc5fc649349fe8303dbf43ddedc --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/.github/FUNDING.yml @@ -0,0 +1,13 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: deforum +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/bug_report.yml b/extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000000000000000000000000000000000000..517bf43dc04532e64c09e5d96221f009f8706529 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,105 @@ +name: Bug Report +description: Create a bug report for the Deforum extension +title: "[Bug]: " +labels: ["bug"] + +body: + - type: checkboxes + attributes: + label: Have you read the latest version of the FAQ? + description: Please visit the page called FAQ & Troubleshooting on the Deforum wiki in this repository and see if your problem has already been described there. + options: + - label: I have visited the FAQ page right now and my issue is not present there + required: true + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the bug you encountered (including the closed issues). + options: + - label: I have searched the existing issues and checked the recent builds/commits of both this extension and the webui + required: true + - type: checkboxes + attributes: + label: Are you using the latest version of the Deforum extension? + description: Please, check if your Deforum is based on the latest repo commit (git log) or update it through the 'Extensions' tab and check if the issue still persist. Otherwise, check this box. + options: + - label: I have Deforum updated to the lastest version and I still have the issue. + required: true + - type: markdown + attributes: + value: | + *Please fill this form with as much information as possible, don't forget to fill "What OS..." and *provide screenshots if possible** + - type: markdown + attributes: + value: | + **Forewarning:* if you won't provide the full crash log, your issue will be discarded* + - type: textarea + id: what-did + attributes: + label: What happened? + description: Tell us what happened in a very clear and simple way + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to reproduce the problem + description: Please provide us with precise step by step information on how to reproduce the bug + value: | + 1. Go to .... + 2. Press .... + 3. ... + validations: + required: true + - type: textarea + id: what-should + attributes: + label: What should have happened/how would you fix it? + description: Tell what you think the normal behavior should be or any ideas on how to solve it + - type: textarea + id: what-torch + attributes: + label: Torch version + description: Which Torch version your WebUI is working with. You can find it by looking at the bottom of the page. + validations: + required: true + - type: dropdown + id: where + attributes: + label: On which platform are you launching the webui with the extension? + multiple: true + options: + - Local PC setup (Windows) + - Local PC setup (Linux) + - Local PC setup (Mac) + - Google Colab (The Last Ben's) + - Google Colab (Other) + - Cloud server (Linux) + - Other (please specify in "additional information") + - type: textarea + id: deforumsettings + attributes: + label: Deforum settings + description: Send here a link to your used settings file or the latest generated one in the 'outputs/img2img-images/Deforum/' folder (ideally, upload it to GitHub gists). + validations: + required: true + - type: textarea + id: customsettings + attributes: + label: Webui core settings + description: Send here a link to your ui-config.json file in the core 'stable-diffusion-webui' folder. Notice, if you have 'With img2img, do exactly the amount of steps the slider specified' checked, your issue will be discarded. + validations: + required: true + - type: textarea + id: logs + attributes: + label: Console logs + description: Now, it is the most important part which most users fail for the first time! Please provide the **full** cmd/terminal logs from the moment you started the webui (i.e. clicked the launch file or started it from cmd) to the part when your bug happened. + render: Shell + validations: + required: true + - type: textarea + id: misc + attributes: + label: Additional information + description: Any relevant additional info or context. diff --git a/extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/config.yml b/extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..3fb606dd5a8462f37c60ab5227124928f6024945 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Deforum Github discussions + url: https://github.com/deforum-art/deforum-for-automatic1111-webui/discussions + about: Please ask and answer questions here. If you want to complain about something, don't try to circumvent issue filling by starting a discussion here 🙃 + - name: Deforum Discord + url: https://discord.gg/deforum + about: Here is our main community where we chat, discuss development and share experiments and results diff --git a/extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/feature_request.yml b/extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000000000000000000000000000000000000..3f4bd7c3bc402d90610aed6053a8e52b1bf8f634 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,46 @@ +name: Feature request +description: Suggest an idea for the Deforum extension +title: "[Feature Request]: " +labels: ["enhancement"] + +body: + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit. + options: + - label: I have searched the existing issues and checked the recent builds/commits + required: true + - type: markdown + attributes: + value: | + *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible* + - type: textarea + id: feature + attributes: + label: What would your feature do ? + description: Tell us about your feature in a very clear and simple way, and what problem it would solve + validations: + required: true + - type: textarea + id: workflow + attributes: + label: Proposed workflow + description: Please provide us with step by step information on how you'd like the feature to be accessed and used + value: | + 1. Go to .... + 2. Press .... + 3. ... + validations: + required: true + - type: textarea + id: misc + attributes: + label: Additional information + description: Add any other context or screenshots about the feature request here. + - type: textarea + attributes: + label: Are you going to help adding it? + description: Do you want to participate in Deforum development and bring the desired feature sooner? Let us know if you are willing to add the desired feature, ideally, leave your Discord handle here, so we will contact you for a less formal conversation. Our community is welcoming and ready to provide you with any information on the project structure or how the code works. If not, however, keep in mind that if you do not want to do your new feature yourself, you will have to wait until the team picks up your issue. + validations: + required: true diff --git a/extensions-builtin/sd-webui-deforum/.github/scripts/issue_checker.py b/extensions-builtin/sd-webui-deforum/.github/scripts/issue_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..4939ac81e19c879eb3acbc07e2a4ce72652ffe6b --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/.github/scripts/issue_checker.py @@ -0,0 +1,126 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import re +from github import Github + +# Get GitHub token from environment variables +token = os.environ['GITHUB_TOKEN'] +g = Github(token) + +# Get the current repository +print(f"Repo is {os.environ['GITHUB_REPOSITORY']}") +repo = g.get_repo(os.environ['GITHUB_REPOSITORY']) + +# Get the issue number from the event payload +#issue_number = int(os.environ['ISSUE_NUMBER']) + +for issue in repo.get_issues(): + print(f"Processing issue №{issue.number}") + if issue.pull_request: + continue + + # Get the issue object + #issue = repo.get_issue(issue_number) + + # Define the keywords to search for in the issue + keywords = ['Python', 'Commit hash', 'Launching Web UI with arguments', 'Model loaded', 'deforum'] + + # Check if ALL of the keywords are present in the issue + def check_keywords(issue_body, keywords): + for keyword in keywords: + if not re.search(r'\b' + re.escape(keyword) + r'\b', issue_body, re.IGNORECASE): + return False + return True + + # Check if the issue title has at least a specified number of words + def check_title_word_count(issue_title, min_word_count): + words = issue_title.replace("/", " ").replace("\\\\", " ").split() + return len(words) >= min_word_count + + # Check if the issue title is concise + def check_title_concise(issue_title, max_word_count): + words = issue_title.replace("/", " ").replace("\\\\", " ").split() + return len(words) <= max_word_count + + # Check if the commit ID is in the correct hash form + def check_commit_id_format(issue_body): + match = re.search(r'webui commit id - ([a-fA-F0-9]+|\[[a-fA-F0-9]+\])', issue_body) + if not match: + print('webui_commit_id not found') + return False + webui_commit_id = match.group(1) + print(f'webui_commit_id {webui_commit_id}') + webui_commit_id = webui_commit_id.replace("[", "").replace("]", "") + if not (7 <= len(webui_commit_id) <= 40): + print(f'invalid length!') + return False + match = re.search(r'deforum exten commit id - ([a-fA-F0-9]+|\[[a-fA-F0-9]+\])', issue_body) + if match: + print('deforum commit id not found') + return False + t2v_commit_id = match.group(1) + print(f'deforum_commit_id {t2v_commit_id}') + t2v_commit_id = t2v_commit_id.replace("[", "").replace("]", "") + if not (7 <= len(t2v_commit_id) <= 40): + print(f'invalid length!') + return False + return True + + # Only if a bug report + if '[Bug]' in issue.title and not '[Feature Request]' in issue.title: + print('The issue is eligible') + # Initialize an empty list to store error messages + error_messages = [] + + # Check for each condition and add the corresponding error message if the condition is not met + if not check_keywords(issue.body, keywords): + error_messages.append("Include **THE FULL LOG FROM THE START OF THE WEBUI** in the issue description.") + + if not check_title_word_count(issue.title, 3): + error_messages.append("Make sure the issue title has at least 3 words.") + + if not check_title_concise(issue.title, 13): + error_messages.append("The issue title should be concise and contain no more than 13 words.") + + # if not check_commit_id_format(issue.body): + # error_messages.append("Provide a valid commit ID in the format 'commit id - [commit_hash]' **both** for the WebUI and the Extension.") + + # If there are any error messages, close the issue and send a comment with the error messages + if error_messages: + print('Invalid issue, closing') + # Add the "not planned" label to the issue + not_planned_label = repo.get_label("wrong format") + issue.add_to_labels(not_planned_label) + + # Close the issue + issue.edit(state='closed') + + # Generate the comment by concatenating the error messages + comment = "This issue has been closed due to incorrect formatting. Please address the following mistakes and reopen the issue (click on the 'Reopen' button below):\n\n" + comment += "\n".join(f"- {error_message}" for error_message in error_messages) + + # Add the comment to the issue + issue.create_comment(comment) + elif repo.get_label("wrong format") in issue.labels: + print('Issue is fine') + issue.edit(state='open') + issue.delete_labels() + bug_label = repo.get_label("bug") + issue.add_to_labels(bug_label) + comment = "Thanks for addressing your formatting mistakes. The issue has been reopened now." + issue.create_comment(comment) diff --git a/extensions-builtin/sd-webui-deforum/.github/workflows/issue_checker.yaml b/extensions-builtin/sd-webui-deforum/.github/workflows/issue_checker.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c39a05b024233380a87d9ba4a22458fae8724c4 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/.github/workflows/issue_checker.yaml @@ -0,0 +1,23 @@ +name: Issue Checker + +on: + issues: + types: [opened, reopened, edited] + +jobs: + check_issue: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.x' + - name: Install dependencies + run: pip install PyGithub + - name: Check issue + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + ISSUE_NUMBER: ${{ github.event.number }} + run: python .github/scripts/issue_checker.py diff --git a/extensions-builtin/sd-webui-deforum/.github/workflows/run_tests.yaml b/extensions-builtin/sd-webui-deforum/.github/workflows/run_tests.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f306c63f2969e1faee9d918b168c0328daaa3812 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/.github/workflows/run_tests.yaml @@ -0,0 +1,108 @@ +name: Tests + +on: + - push + - pull_request + +jobs: + test: + name: tests on CPU with empty model + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + steps: + - name: Checkout a1111 + uses: actions/checkout@v3 + with: + repository: AUTOMATIC1111/stable-diffusion-webui + ref: v1.6.0 + - name: Checkout Controlnet extension + uses: actions/checkout@v3 + with: + repository: Mikubill/sd-webui-controlnet + path: extensions/sd-webui-controlnet + - name: Checkout Deforum + uses: actions/checkout@v3 + with: + path: extensions/deforum + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: 3.10.6 + cache: pip + cache-dependency-path: | + **/requirements*txt + launch.py + - name: Install test dependencies + run: pip install wait-for-it -r extensions/deforum/requirements-dev.txt + env: + PIP_DISABLE_PIP_VERSION_CHECK: "1" + PIP_PROGRESS_BAR: "off" + - name: Setup environment + run: python launch.py --skip-torch-cuda-test --exit + env: + PIP_DISABLE_PIP_VERSION_CHECK: "1" + PIP_PROGRESS_BAR: "off" + TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu + WEBUI_LAUNCH_LIVE_OUTPUT: "1" + PYTHONUNBUFFERED: "1" + - name: Start test server + run: > + python -m coverage run + --data-file=.coverage.server + launch.py + --skip-prepare-environment + --skip-torch-cuda-test + --test-server + --do-not-download-clip + --no-half + --disable-opt-split-attention + --use-cpu all + --api-server-stop + --deforum-api + --api + 2>&1 | tee serverlog.txt & + - name: Run tests (with continue-on-error due to mysterious non-zero return code on success) + continue-on-error: true + id: runtests + run: | + wait-for-it --service 127.0.0.1:7860 -t 600 + cd extensions/deforum + python -m coverage run --data-file=.coverage.client -m pytest -vv --junitxml=tests/results.xml tests + - name: Check for test failures (necessary because of continue-on-error above) + id: testresults + uses: mavrosxristoforos/get-xml-info@1.1.0 + with: + xml-file: 'extensions/deforum/tests/results.xml' + xpath: '//testsuite/@failures' + - name: Fail if there were test failures + run: | + echo "Test failures: ${{ steps.testresults.outputs.info }}" + [ ${{ steps.testresults.outputs.info }} -eq 0 ] + - name: Kill test server + if: always() + run: curl -vv -XPOST http://127.0.0.1:7860/sdapi/v1/server-stop && sleep 10 + - name: Show coverage + run: | + python -m coverage combine .coverage* extensions/deforum/.coverage* + python -m coverage report -i + python -m coverage html -i + - name: Upload main app output + uses: actions/upload-artifact@v3 + if: always() + with: + name: serverlog + path: serverlog.txt + - name: Upload coverage HTML + uses: actions/upload-artifact@v3 + if: always() + with: + name: htmlcov + path: htmlcov + - name: Surface failing tests + if: always() + uses: pmeier/pytest-results-action@main + with: + path: extensions/deforum/tests/results.xml + summary: true + display-options: fEX + fail-on-empty: true diff --git a/extensions-builtin/sd-webui-deforum/.gitignore b/extensions-builtin/sd-webui-deforum/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..1b0eb546ed953b764ae400f9ad8f87c2f85f1afc --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/.gitignore @@ -0,0 +1,34 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +# Unnecessary compiled python files. +__pycache__ +*.pyc +*.pyo + +# Output Images +outputs + +# Log files for colab-convert +cc-outputs.log +*.safetensors +scripts/deforum_helpers/navigation.py + +#test output +htmlcov +tests/results.xml +.coverage* +serverlog.txt diff --git a/extensions-builtin/sd-webui-deforum/CONTRIBUTING.md b/extensions-builtin/sd-webui-deforum/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..bef3e12e9c4b573a0bf417fee7b3ddf5dff0705c --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contributing + +When contributing please ping the devs via Discord https://discord.gg/deforum to make sure you addition will fit well such a large project and to get help if needed. + +*By contributing to this project you agree that your work will be granted copyright to Deforum LLC and licensed under the terms of the GNU Affero General Public License version 3.* diff --git a/extensions-builtin/sd-webui-deforum/LICENSE b/extensions-builtin/sd-webui-deforum/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8b33424baf521ea76c44f2a7b8658e786a1206f3 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/LICENSE @@ -0,0 +1,3228 @@ +Deforum extension: + +**Copyright (c) 2023 Deforum LLC** + +# GNU AFFERO GENERAL PUBLIC LICENSE + +Version 3, 19 November 2007 + +Copyright (C) 2007 Free Software Foundation, Inc. + + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +## Preamble + +The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + +The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains +free software for all its users. + +When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + +Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + +A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + +The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + +An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing +under this license. + +The precise terms and conditions for copying, distribution and +modification follow. + +## TERMS AND CONDITIONS + +### 0. Definitions. + +"This License" refers to version 3 of the GNU Affero General Public +License. + +"Copyright" also means copyright-like laws that apply to other kinds +of works, such as semiconductor masks. + +"The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + +To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of +an exact copy. The resulting work is called a "modified version" of +the earlier work or a work "based on" the earlier work. + +A "covered work" means either the unmodified Program or a work based +on the Program. + +To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + +To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user +through a computer network, with no transfer of a copy, is not +conveying. + +An interactive user interface displays "Appropriate Legal Notices" to +the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + +### 1. Source Code. + +The "source code" for a work means the preferred form of the work for +making modifications to it. "Object code" means any non-source form of +a work. + +A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + +The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + +The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can +regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same +work. + +### 2. Basic Permissions. + +All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, +without conditions so long as your license otherwise remains in force. +You may convey covered works to others for the sole purpose of having +them make modifications exclusively for you, or provide you with +facilities for running those works, provided that you comply with the +terms of this License in conveying all material for which you do not +control copyright. Those thus making or running the covered works for +you must do so exclusively on your behalf, under your direction and +control, on terms that prohibit them from making any copies of your +copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the +conditions stated below. Sublicensing is not allowed; section 10 makes +it unnecessary. + +### 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + +No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + +When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such +circumvention is effected by exercising rights under this License with +respect to the covered work, and you disclaim any intention to limit +operation or modification of the work as a means of enforcing, against +the work's users, your or third parties' legal rights to forbid +circumvention of technological measures. + +### 4. Conveying Verbatim Copies. + +You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + +### 5. Conveying Modified Source Versions. + +You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these +conditions: + +- a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. +- b) The work must carry prominent notices stating that it is + released under this License and any conditions added under + section 7. This requirement modifies the requirement in section 4 + to "keep intact all notices". +- c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. +- d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + +A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + +### 6. Conveying Non-Source Forms. + +You may convey a covered work in object code form under the terms of +sections 4 and 5, provided that you also convey the machine-readable +Corresponding Source under the terms of this License, in one of these +ways: + +- a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. +- b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the Corresponding + Source from a network server at no charge. +- c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. +- d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. +- e) Convey the object code using peer-to-peer transmission, + provided you inform other peers where the object code and + Corresponding Source of the work are being offered to the general + public at no charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + +A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, +family, or household purposes, or (2) anything designed or sold for +incorporation into a dwelling. In determining whether a product is a +consumer product, doubtful cases shall be resolved in favor of +coverage. For a particular product received by a particular user, +"normally used" refers to a typical or common use of that class of +product, regardless of the status of the particular user or of the way +in which the particular user actually uses, or expects or is expected +to use, the product. A product is a consumer product regardless of +whether the product has substantial commercial, industrial or +non-consumer uses, unless such uses represent the only significant +mode of use of the product. + +"Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to +install and execute modified versions of a covered work in that User +Product from a modified version of its Corresponding Source. The +information must suffice to ensure that the continued functioning of +the modified object code is in no case prevented or interfered with +solely because modification has been made. + +If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + +The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or +updates for a work that has been modified or installed by the +recipient, or for the User Product in which it has been modified or +installed. Access to a network may be denied when the modification +itself materially and adversely affects the operation of the network +or violates the rules and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + +### 7. Additional Terms. + +"Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders +of that material) supplement the terms of this License with terms: + +- a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or +- b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or +- c) Prohibiting misrepresentation of the origin of that material, + or requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or +- d) Limiting the use for publicity purposes of names of licensors + or authors of the material; or +- e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or +- f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions + of it) with contractual assumptions of liability to the recipient, + for any liability that these contractual assumptions directly + impose on those licensors and authors. + +All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; the +above requirements apply either way. + +### 8. Termination. + +You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + +However, if you cease all violation of this License, then your license +from a particular copyright holder is reinstated (a) provisionally, +unless and until the copyright holder explicitly and finally +terminates your license, and (b) permanently, if the copyright holder +fails to notify you of the violation by some reasonable means prior to +60 days after the cessation. + +Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + +Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + +### 9. Acceptance Not Required for Having Copies. + +You are not required to accept this License in order to receive or run +a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + +### 10. Automatic Licensing of Downstream Recipients. + +Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + +An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + +### 11. Patents. + +A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + +A contributor's "essential patent claims" are all patent claims owned +or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + +In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + +If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + +A patent license is "discriminatory" if it does not include within the +scope of its coverage, prohibits the exercise of, or is conditioned on +the non-exercise of one or more of the rights that are specifically +granted under this License. You may not convey a covered work if you +are a party to an arrangement with a third party that is in the +business of distributing software, under which you make payment to the +third party based on the extent of your activity of conveying the +work, and under which the third party grants, to any of the parties +who would receive the covered work from you, a discriminatory patent +license (a) in connection with copies of the covered work conveyed by +you (or copies made from those copies), or (b) primarily for and in +connection with specific products or compilations that contain the +covered work, unless you entered into that arrangement, or that patent +license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + +### 12. No Surrender of Others' Freedom. + +If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under +this License and any other pertinent obligations, then as a +consequence you may not convey it at all. For example, if you agree to +terms that obligate you to collect a royalty for further conveying +from those to whom you convey the Program, the only way you could +satisfy both those terms and this License would be to refrain entirely +from conveying the Program. + +### 13. Remote Network Interaction; Use with the GNU General Public License. + +Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your +version supports such interaction) an opportunity to receive the +Corresponding Source of your version by providing access to the +Corresponding Source from a network server at no charge, through some +standard or customary means of facilitating copying of software. This +Corresponding Source shall include the Corresponding Source for any +work covered by version 3 of the GNU General Public License that is +incorporated pursuant to the following paragraph. + +Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + +### 14. Revised Versions of this License. + +The Free Software Foundation may publish revised and/or new versions +of the GNU Affero General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever +published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions +of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + +Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + +### 15. Disclaimer of Warranty. + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT +WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND +PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE +DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR +CORRECTION. + +### 16. Limitation of Liability. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR +CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT +NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR +LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM +TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER +PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +### 17. Interpretation of Sections 15 and 16. + +If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS + +## How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these +terms. + +To do so, attach the following notices to the program. It is safest to +attach them to the start of each source file to most effectively state +the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as + published by the Free Software Foundation, either version 3 of the + License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper +mail. + +If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for +the specific requirements. + +You should also get your employer (if you work as a programmer) or +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. For more information on this, and how to apply and follow +the GNU AGPL, see . + +deforum-stable-diffusion: +MIT License + +Copyright (c) 2022 deforum and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +k-diffusion: +MIT License + +Copyright (c) 2022 Katherine Crowson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +clip: +MIT License + +Copyright (c) 2021 OpenAI + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +MiDaS: +MIT License + +Copyright (c) 2019 Intel ISL (Intel Intelligent Systems Lab) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +ZoeDepth: +MIT License + +Copyright (c) 2022 Intelligent Systems Lab Org + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +pytorch3d-lite: +BSD License + +For PyTorch3D software + +Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Meta nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +taming-transformers: +Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE./ + +stable diffusion: +Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors + +CreativeML Open RAIL-M +dated August 22, 2022 + +Section I: PREAMBLE + +Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation. + +Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations. + +In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation. + +Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI. + +This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model. + +NOW THEREFORE, You and Licensor agree as follows: + +1. Definitions + +- "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document. +- "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License. +- "Output" means the results of operating a Model as embodied in informational content resulting therefrom. +- "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material. +- "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model. +- "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any. +- "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access. +- "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model. +- "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator. +- "Third Parties" means individuals or legal entities that are not under common control with Licensor or You. +- "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." +- "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model. + +Section II: INTELLECTUAL PROPERTY RIGHTS + +Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model. +3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed. + +Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION + +4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions: +Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material. +You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License; +You must cause any modified files to carry prominent notices stating that You changed the files; +You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model. +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License. +5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5). +6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License. + +Section IV: OTHER PROVISIONS + +7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model. +8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors. +9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License. +10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. +11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. +12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. + +END OF TERMS AND CONDITIONS + + + + +Attachment A + +Use Restrictions + +You agree not to use the Model or Derivatives of the Model: +- In any way that violates any applicable national, federal, state, local or international law or regulation; +- For the purpose of exploiting, harming or attempting to exploit or harm minors in any way; +- To generate or disseminate verifiably false information and/or content with the purpose of harming others; +- To generate or disseminate personal identifiable information that can be used to harm an individual; +- To defame, disparage or otherwise harass others; +- For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation; +- For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics; +- To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm; +- For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories; +- To provide medical advice and medical results interpretation; +- To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use). + +transformers: +Copyright 2018- The Hugging Face team. All rights reserved. + +transformers, FILM Interpolation: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +adabins: + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + +adabins: + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + +Clipseg: +MIT License + +This license does not apply to the model weights. + +RIFE: +MIT License + +Copyright (c) 2021 hzwer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +gifski: +GNU AFFERO GENERAL PUBLIC LICENSE ++ +pngquant.c + +### pngquant.c +© 1989, 1991 by Jef Poskanzer. + +Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation. This software is provided "as is" without express or implied warranty. + +© 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider. + +All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +### GNU AFFERO GENERAL PUBLIC LICENSE + +Version 3, 19 November 2007 + +© 2007 Free Software Foundation, Inc. + + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +### Preamble + +The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + +The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains +free software for all its users. + +When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + +Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + +A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + +The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + +An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing +under this license. + +The precise terms and conditions for copying, distribution and +modification follow. + +### TERMS AND CONDITIONS + +#### 0. Definitions. + +"This License" refers to version 3 of the GNU Affero General Public +License. + +"Copyright" also means copyright-like laws that apply to other kinds +of works, such as semiconductor masks. + +"The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + +To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of +an exact copy. The resulting work is called a "modified version" of +the earlier work or a work "based on" the earlier work. + +A "covered work" means either the unmodified Program or a work based +on the Program. + +To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + +To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user +through a computer network, with no transfer of a copy, is not +conveying. + +An interactive user interface displays "Appropriate Legal Notices" to +the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + +#### 1. Source Code. + +The "source code" for a work means the preferred form of the work for +making modifications to it. "Object code" means any non-source form of +a work. + +A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + +The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + +The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can +regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same +work. + +#### 2. Basic Permissions. + +All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, +without conditions so long as your license otherwise remains in force. +You may convey covered works to others for the sole purpose of having +them make modifications exclusively for you, or provide you with +facilities for running those works, provided that you comply with the +terms of this License in conveying all material for which you do not +control copyright. Those thus making or running the covered works for +you must do so exclusively on your behalf, under your direction and +control, on terms that prohibit them from making any copies of your +copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the +conditions stated below. Sublicensing is not allowed; section 10 makes +it unnecessary. + +#### 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + +No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + +When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such +circumvention is effected by exercising rights under this License with +respect to the covered work, and you disclaim any intention to limit +operation or modification of the work as a means of enforcing, against +the work's users, your or third parties' legal rights to forbid +circumvention of technological measures. + +#### 4. Conveying Verbatim Copies. + +You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + +#### 5. Conveying Modified Source Versions. + +You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these +conditions: + +- a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. +- b) The work must carry prominent notices stating that it is + released under this License and any conditions added under + section 7. This requirement modifies the requirement in section 4 + to "keep intact all notices". +- c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. +- d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + +A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + +#### 6. Conveying Non-Source Forms. + +You may convey a covered work in object code form under the terms of +sections 4 and 5, provided that you also convey the machine-readable +Corresponding Source under the terms of this License, in one of these +ways: + +- a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. +- b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the Corresponding + Source from a network server at no charge. +- c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. +- d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. +- e) Convey the object code using peer-to-peer transmission, + provided you inform other peers where the object code and + Corresponding Source of the work are being offered to the general + public at no charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + +A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, +family, or household purposes, or (2) anything designed or sold for +incorporation into a dwelling. In determining whether a product is a +consumer product, doubtful cases shall be resolved in favor of +coverage. For a particular product received by a particular user, +"normally used" refers to a typical or common use of that class of +product, regardless of the status of the particular user or of the way +in which the particular user actually uses, or expects or is expected +to use, the product. A product is a consumer product regardless of +whether the product has substantial commercial, industrial or +non-consumer uses, unless such uses represent the only significant +mode of use of the product. + +"Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to +install and execute modified versions of a covered work in that User +Product from a modified version of its Corresponding Source. The +information must suffice to ensure that the continued functioning of +the modified object code is in no case prevented or interfered with +solely because modification has been made. + +If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + +The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or +updates for a work that has been modified or installed by the +recipient, or for the User Product in which it has been modified or +installed. Access to a network may be denied when the modification +itself materially and adversely affects the operation of the network +or violates the rules and protocols for communication across the +network. + +Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + +#### 7. Additional Terms. + +"Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders +of that material) supplement the terms of this License with terms: + +- a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or +- b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or +- c) Prohibiting misrepresentation of the origin of that material, + or requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or +- d) Limiting the use for publicity purposes of names of licensors + or authors of the material; or +- e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or +- f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions + of it) with contractual assumptions of liability to the recipient, + for any liability that these contractual assumptions directly + impose on those licensors and authors. + +All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; the +above requirements apply either way. + +#### 8. Termination. + +You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + +However, if you cease all violation of this License, then your license +from a particular copyright holder is reinstated (a) provisionally, +unless and until the copyright holder explicitly and finally +terminates your license, and (b) permanently, if the copyright holder +fails to notify you of the violation by some reasonable means prior to +60 days after the cessation. + +Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + +Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + +#### 9. Acceptance Not Required for Having Copies. + +You are not required to accept this License in order to receive or run +a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + +#### 10. Automatic Licensing of Downstream Recipients. + +Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + +An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + +#### 11. Patents. + +A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + +A contributor's "essential patent claims" are all patent claims owned +or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + +In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + +If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + +A patent license is "discriminatory" if it does not include within the +scope of its coverage, prohibits the exercise of, or is conditioned on +the non-exercise of one or more of the rights that are specifically +granted under this License. You may not convey a covered work if you +are a party to an arrangement with a third party that is in the +business of distributing software, under which you make payment to the +third party based on the extent of your activity of conveying the +work, and under which the third party grants, to any of the parties +who would receive the covered work from you, a discriminatory patent +license (a) in connection with copies of the covered work conveyed by +you (or copies made from those copies), or (b) primarily for and in +connection with specific products or compilations that contain the +covered work, unless you entered into that arrangement, or that patent +license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + +#### 12. No Surrender of Others' Freedom. + +If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under +this License and any other pertinent obligations, then as a +consequence you may not convey it at all. For example, if you agree to +terms that obligate you to collect a royalty for further conveying +from those to whom you convey the Program, the only way you could +satisfy both those terms and this License would be to refrain entirely +from conveying the Program. + +#### 13. Remote Network Interaction; Use with the GNU General Public License. + +Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your +version supports such interaction) an opportunity to receive the +Corresponding Source of your version by providing access to the +Corresponding Source from a network server at no charge, through some +standard or customary means of facilitating copying of software. This +Corresponding Source shall include the Corresponding Source for any +work covered by version 3 of the GNU General Public License that is +incorporated pursuant to the following paragraph. + +Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + +#### 14. Revised Versions of this License. + +The Free Software Foundation may publish revised and/or new versions +of the GNU Affero General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever +published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions +of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + +Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + +#### 15. Disclaimer of Warranty. + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT +WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND +PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE +DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR +CORRECTION. + +#### 16. Limitation of Liability. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR +CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT +NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR +LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM +TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER +PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +#### 17. Interpretation of Sections 15 and 16. + +If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + +REALESRGAN/ REALESRGAN-NCNN-VULKAN: +MIT License (MIT) + +Copyright (c) 2021 Xintao Wang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------ +The following is the License of realsr-ncnn-vulkan + +The MIT License (MIT) + +Copyright (c) 2019 nihui + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Disco Diffusion: +MIT License + +Copyright (c) 2021 Maxwell Ingham +Copyright (c) 2022 Adam Letts +Copyright (c) 2022 Alex Spirin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +LeReS Depth Estimation: +Adobe Research License Terms + +1. You may use, reproduce, modify, and display the research materials provided under this license (the “Research +Materials”) solely for noncommercial purposes. Noncommercial purposes include academic research, teaching, and +testing, but do not include commercial licensing or distribution, development of commercial products, or any other +activity which results in commercial gain. You may not redistribute the Research Materials. + +2. You agree to (a) comply with all laws and regulations applicable to your use of the Research Materials under this license, +including but not limited to any import or export laws; (b) preserve any copyright or other notices from the Research +Materials; and (c) for any Research Materials in object code, not attempt to modify, reverse engineer, or decompile +such Research Materials except as permitted by applicable law. + +3. THE RESEARCH MATERIALS ARE PROVIDED “AS IS,” WITHOUT WARRANTY OF ANY KIND, AND YOU ASSUME ALL RISKS +ASSOCIATED WITH THEIR USE. IN NO EVENT WILL ANYONE BE LIABLE TO YOU FOR ANY ACTUAL, INCIDENTAL, SPECIAL, +OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION WITH USE OF THE RESEARCH MATERIALS. \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/README.md b/extensions-builtin/sd-webui-deforum/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c250b2009be1e49c9e274ee35789ac7b26168a69 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/README.md @@ -0,0 +1,73 @@ + +# Deforum Stable Diffusion — official extension for AUTOMATIC1111's webui + +

+ Last Commit + GitHub issues + GitHub stars + GitHub forks + +

+ +## Need help? See our [FAQ](https://github.com/deforum-art/sd-webui-deforum/wiki/FAQ-&-Troubleshooting) + +## Getting Started + +1. Install [AUTOMATIC1111's webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui/). + +2. Now two ways: either clone the repo into the `extensions` directory via git commandline launched within in the `stable-diffusion-webui` folder + +```sh +git clone https://github.com/deforum-art/sd-webui-deforum extensions/deforum +``` + +Or download this repository, locate the `extensions` folder within your WebUI installation, create a folder named `deforum` and put the contents of the downloaded directory inside of it. Then restart WebUI. + +3. Open the webui, find the Deforum tab at the top of the page. + +4. Enter the animation settings. Refer to [this general guide](https://docs.google.com/document/d/1pEobUknMFMkn8F5TMsv8qRzamXX_75BShMMXV8IFslI/edit) and [this guide to math keyframing functions in Deforum](https://docs.google.com/document/d/1pfW1PwbDIuW0cv-dnuyYj1UzPqe23BlSLTJsqazffXM/edit?usp=sharing). However, **in this version prompt weights less than zero don't just like in original Deforum!** Split the positive and the negative prompt in the json section using --neg argument like this "apple:\`where(cos(t)>=0, cos(t), 0)\`, snow --neg strawberry:\`where(cos(t)<0, -cos(t), 0)\`" + +5. To view animation frames as they're being made, without waiting for the completion of an animation, go to the 'Settings' tab and set the value of this toolbar **above zero**. Warning: it may slow down the generation process. + +![adsdasunknown](https://user-images.githubusercontent.com/14872007/196064311-1b79866a-e55b-438a-84a7-004ff30829ad.png) + + +6. Run the script and see if you got it working or even got something. **In 3D mode a large delay is expected at first** as the script loads the depth models. In the end, using the default settings the whole thing should consume 6.4 GBs of VRAM at 3D mode peaks and no more than 3.8 GB VRAM in 3D mode if you launch the webui with the '--lowvram' command line argument. + +7. After the generation process is completed, click the button with the self-describing name to show the video or gif result right in the GUI! + +8. Join our Discord where you can post generated stuff, ask questions and more: https://discord.gg/deforum.
+* There's also the 'Issues' tab in the repo, for well... reporting issues ;) + +9. Profit! + +## Known issues + +* This port is not fully backward-compatible with the notebook and the local version both due to the changes in how AUTOMATIC1111's webui handles Stable Diffusion models and the changes in this script to get it to work in the new environment. *Expect* that you may not get exactly the same result or that the thing may break down because of the older settings. + +## Screenshots + +Amazing raw Deforum animation by [Pxl.Pshr](https://www.instagram.com/pxl.pshr): +* Turn Audio ON! + +(Audio credits: SKRILLEX, FRED AGAIN & FLOWDAN - RUMBLE (PHACE'S DNB FLIP)) + +https://user-images.githubusercontent.com/121192995/224450647-39529b28-be04-4871-bb7a-faf7afda2ef2.mp4 + +Setting file of that video: [here](https://github.com/deforum-art/sd-webui-deforum/files/11353167/PxlPshrWinningAnimationSettings.txt). + +
+ +Main extension tab: + +![image](https://user-images.githubusercontent.com/121192995/226101131-43bf594a-3152-45dd-a5d1-2538d0bc221d.png) + +Keyframes tab: + +![image](https://user-images.githubusercontent.com/121192995/226101140-bfe6cce7-9b78-4a1d-be9a-43e1fc78239e.png) + +## License + +This program is distributed under the terms of the GNU Affero Public License v3.0, copyright (c) 2023 Deforum LLC. + +Some of its sublicensed integrated 3rd party components may have other licenses, see LICENSE for usage terms. diff --git a/extensions-builtin/sd-webui-deforum/install.py b/extensions-builtin/sd-webui-deforum/install.py new file mode 100644 index 0000000000000000000000000000000000000000..e4629b4d0f708cb5c4190784c848277544adb80d --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/install.py @@ -0,0 +1,26 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import launch +import os + +req_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "requirements.txt") + +with open(req_file) as file: + for lib in file: + lib = lib.strip() + if not launch.is_installed(lib): + launch.run_pip(f"install {lib}", f"Deforum requirement: {lib}") \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/javascript/deforum-hints.js b/extensions-builtin/sd-webui-deforum/javascript/deforum-hints.js new file mode 100644 index 0000000000000000000000000000000000000000..7112b3ef119ac29171e35589bab40bbe441aa0d5 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/javascript/deforum-hints.js @@ -0,0 +1,232 @@ +/* +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +Contact the authors: https://deforum.github.io/ +*/ + +// mouseover tooltips for various UI elements + +deforum_titles = { + //Run + "Override settings": "specify a custom settings file and ignore settings displayed in the interface", + "Custom settings file": "the path to a custom settings file", + "Width": "The width of the output images, in pixels (must be a multiple of 64)", + "Height": "The height of the output images, in pixels (must be a multiple of 64)", + "Restore faces": "Restore low quality faces using GFPGAN neural network", + "Tiling": "Produce an image that can be tiled.", + "Highres. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition", + "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result", + "Sampler": "Which algorithm to use to produce the image", + "Enable extras": "enable additional seed settings", + "Subseed": "Seed of a different picture to be mixed into the generation.", + "Subseed strength": "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).", + "Resize seed from width": "Normally, changing the resolution will completely change an image, even when using the same seed. If you generated an image with a particular seed and then changed the resolution, put the original resolution here to get an image that more closely resemles the original", + "Resize seed from height": "Normally, changing the resolution will completely change an image, even when using the same seed. If you generated an image with a particular seed and then changed the resolution, put the original resolution here to get an image that more closely resemles the original", + "Steps": "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results", + "Batch name": "output images will be placed in a folder with this name ({timestring} token will be replaced) inside the img2img output folder. Supports placeholders like {seed}, {w}, {h}, {prompts} and more", + "Pix2Pix img CFG schedule": "*Only in use with pix2pix checkpoints!*", + "Filename format": "specify the format of the filename for output images", + "Seed behavior": "defines the seed behavior that is used for animations", + "iter": "the seed value will increment by 1 for each subsequent frame of the animation", + "fixed": "the seed will remain fixed across all frames of animation. **NOT RECOMMENDED.** Unless you know what you are doing, it will *deep fry* the pictures over time", + "random": "a random seed will be used on each frame of the animation", + "schedule": "specify your own seed schedule", + "Seed iter N":"controls for how many frames the same seed should stick before iterating to the next one", + //Keyframes + "Animation mode": "selects the type of animation", + "2D": "only 2D motion parameters will be used, but this mode uses the least amount of VRAM. You can optionally enable flip_2d_perspective to enable some psuedo-3d animation parameters while in 2D mode.", + "3D": "enables all 3D motion parameters.", + "Video Input": "will ignore all motion parameters and attempt to reference a video loaded into the runtime, specified by the video_init_path. Max_frames is ignored during video_input mode, and instead, follows the number of frames pulled from the video’s length. Resume_from_timestring is NOT available with Video_Input mode.", + "Max frames": "the maximum number of output images to be created", + "Border": "controls handling method of pixels to be generated when the image is smaller than the frame.", + "wrap": "pulls pixels from the opposite edge of the image", + "replicate": "repeats the edge of the pixels, and extends them. Animations with quick motion may yield lines where this border function was attempting to populate pixels into the empty space created.", + "Zoom": "2D operator that scales the canvas size, multiplicatively. [static = 1.0]", + "Angle": "2D operator to rotate canvas clockwise/anticlockwise in degrees per frame", + "Transform Center X": "x center axis for 2D angle/zoom *only*", + "Transform Center Y": "y center axis for 2D angle/zoom *only*", + "Translation X": "2D & 3D operator to move canvas left/right in pixels per frame", + "Translation Y": "2D & 3D operator to move canvas up/down in pixels per frame", + "Translation Z": "3D operator to move canvas towards/away from view [speed set by FOV]", + "Rotation 3D X": "3D operator to tilt canvas up/down in degrees per frame", + "Rotation 3D Y": "3D operator to pan canvas left/right in degrees per frame", + "Rotation 3D Z": "3D operator to roll canvas clockwise/anticlockwise", + "Enable perspective flip": "enables 2D mode functions to simulate faux 3D movement", + "Perspective flip theta": "the roll effect angle", + "Perspective flip phi": "the tilt effect angle", + "Perspective flip gamma": "the pan effect angle", + "Perspective flip fv": "the 2D vanishing point of perspective (recommended range 30-160)", + "Noise schedule": "amount of graininess to add per frame for diffusion diversity", + "Strength schedule": "amount of presence of previous frame to influence next frame, also controls steps in the following formula [steps - (strength_schedule * steps)]", + "Sampler schedule": "controls which sampler to use at a specific scheduled frame", + "Contrast schedule": "adjusts the overall contrast per frame [default neutral at 1.0]", + "CFG scale schedule": "how closely the image should conform to the prompt. Lower values produce more creative results. (recommended range 5-15)", + "FOV schedule": "adjusts the scale at which the canvas is moved in 3D by the translation_z value. [maximum range -180 to +180, with 0 being undefined. Values closer to 180 will make the image have less depth, while values closer to 0 will allow more depth]", + "Aspect Ratio schedule": "adjusts the aspect ratio for the depth calculation (normally 1)", + //"near_schedule": "", + //"far_schedule": "", + "Seed schedule": "allows you to specify seeds at a specific schedule, if seed_behavior is set to schedule.", + "Color coherence": "The color coherence will attempt to sample the overall pixel color information, and trend those values analyzed in the first frame to be applied to future frames.", + // "None": "Disable color coherence", + "HSV": "HSV is a good method for balancing presence of vibrant colors, but may produce unrealistic results - (ie.blue apples)", + "LAB": "LAB is a more linear approach to mimic human perception of color space - a good default setting for most users.", + "RGB": "RGB is good for enforcing unbiased amounts of color in each red, green and blue channel - some images may yield colorized artifacts if sampling is too low.", + "Legacy colormatch": "applies the colormatch only before the video noising, resulting in graying the video over time, use it for backwards compatibility", + "Cadence": "A setting of 1 will cause every frame to receive diffusion in the sequence of image outputs. A setting of 2 will only diffuse on every other frame, yet motion will still be in effect. The output of images during the cadence sequence will be automatically blended, additively and saved to the specified drive. This may improve the illusion of coherence in some workflows as the content and context of an image will not change or diffuse during frames that were skipped. Higher values of 4-8 cadence will skip over a larger amount of frames and only diffuse the “Nth” frame as set by the diffusion_cadence value. This may produce more continuity in an animation, at the cost of little opportunity to add more diffused content. In extreme examples, motion within a frame will fail to produce diverse prompt context, and the space will be filled with lines or approximations of content - resulting in unexpected animation patterns and artifacts. Video Input & Interpolation modes are not affected by diffusion_cadence.", + "Optical flow cadence": "Optional method for optical flow used to blend frames during cadence in 3D animation mode (if cadence more than 1).", + "Optical flow redo generation": "This option takes twice as long because it generates twice in order to capture the optical flow from the previous image to the first generation, then warps the previous image and redoes the generation. Works in 2D/3D animation modes.", + "Redo": "Diffusion Redo. This option renders N times before the final render. It is suggested to lower your steps if you up your redo. Seed is randomized during redo generations and restored afterwards.", + "Noise type": "Selects the type of noise being added to each frame", + "uniform": "Uniform noise covers the entire frame. It somewhat flattens and sharpens the video over time, but may be good for cartoonish look. This is the old default setting.", + "perlin": "Perlin noise is a more natural looking noise. It is heterogeneous and less sharp than uniform noise, this way it is more likely that new details will appear in a more coherent way. This is the new default setting.", + "Perlin W": "The width of the Perlin sample. Lower values will make larger noise regions. Think of it as inverse brush stroke width. The greater this setting, the smaller details it will affect.", + "Perlin H": "The height of the Perlin sample. Lower values will make larger noise regions. Think of it as inverse brush stroke width. The greater this setting, the smaller details it will affect.", + "Perlin octaves": "The number of Perlin noise octaves, that is the count of P-noise iterations. Higher values will make the noise more soft and smoke-like, whereas lower values will make it look more organic and spotty. It is limited by 8 octaves as the resulting gain will run out of bounds.", + "Perlin persistence": "How much of noise from each octave is added on each iteration. Higher values will make it more straighter and sharper, while lower values will make it rounder and smoother. It is limited by 1.0 as the resulting gain fill the frame completely with noise.", + "Use depth warping": "enables instructions to warp an image dynamically in 3D mode only.", + "MiDaS weight": "sets a midpoint at which a depthmap is to be drawn: range [-1 to +1]", + "Padding mode": "instructs the handling of pixels outside the field of view as they come into the scene.", + //"border": "Border will attempt to use the edges of the canvas as the pixels to be drawn", //duplicate name as another property + "reflection": "reflection will attempt to approximate the image and tile/repeat pixels", + "zeros": "zeros will not add any new pixel information", + "Sampling Mode": "choose from Bicubic, Bilinear or Nearest modes. (Recommended: Bicubic)", + "Save depth maps": "will output a greyscale depth map image alongside the output images.", + + // Prompts + "Prompts": "prompts for your animation in a JSON format. Use --neg words to add 'words' as negative prompt", + "Prompts positive": "positive prompt to be appended to *all* prompts", + "Prompts negative": "negative prompt to be appended to *all* prompts. DON'T use --neg here!", + + //Init + "Use init": "Diffuse the first frame based on an image, similar to img2img.", + "Strength": "Controls the strength of the diffusion on the init image. 0 = disabled", + "Strength 0 no init": "Set the strength to 0 automatically when no init image is used", + "Init image": "the path to your init image", + "Use mask": "Use a grayscale image as a mask on your init image. Whiter areas of the mask are areas that change more.", + "Use alpha as mask": "use the alpha channel of the init image as the mask", + "Mask file": "the path to your mask image", + "Invert mask": "Inverts the colors of the mask", + "Mask brightness adjust": "adjust the brightness of the mask. Should be a positive number, with 1.0 meaning no adjustment.", + "Mask contrast adjust": "adjust the brightness of the mask. Should be a positive number, with 1.0 meaning no adjustment.", + "overlay mask": "Overlay the masked image at the end of the generation so it does not get degraded by encoding and decoding", + "Mask overlay blur": "Blur edges of final overlay mask, if used. Minimum = 0 (no blur)", + "Video init path": "the directory \/ URL at which your video file is located for Video Input mode only", + "Extract nth frame": "during the run sequence, only frames specified by this value will be extracted, saved, and diffused upon. A value of 1 indicates that every frame is to be accounted for. Values of 2 will use every other frame for the sequence. Higher values will skip that number of frames respectively.", + "Extract from frame":"start extracting the input video only from this frame number", + "Extract to frame": "stop the extraction of the video at this frame number. -1 for no limits", + "Overwrite extracted frames": "when enabled, will re-extract video frames each run. When using video_input mode, the run will be instructed to write video frames to the drive. If you’ve already populated the frames needed, uncheck this box to skip past redundant extraction, and immediately start the render. If you have not extracted frames, you must run at least once with this box checked to write the necessary frames.", + "Use mask video": "video_input mode only, enables the extraction and use of a separate video file intended for use as a mask. White areas of the extracted video frames will not be affected by diffusion, while black areas will be fully effected. Lighter/darker areas are affected dynamically.", + "Video mask path": "the directory in which your mask video is located.", + "Interpolate key frames": "selects whether to ignore prompt schedule or _x_frames.", + "Interpolate x frames": "the number of frames to transition thru between prompts (when interpolate_key_frames = true, then the numbers in front of the animation prompts will dynamically guide the images based on their value. If set to false, will ignore the prompt numbers and force interpole_x_frames value regardless of prompt number)", + "Resume from timestring": "instructs the run to start from a specified point", + "Resume timestring": "the required timestamp to reference when resuming. Currently only available in 2D & 3D mode, the timestamp is saved as the settings .txt file name as well as images produced during your previous run. The format follows: yyyymmddhhmmss - a timestamp of when the run was started to diffuse.", + + //Video Output + "Skip video creation": "when checked, do not output a video", + "Make GIF": "create a gif in addition to .mp4 file. supports up to 30 fps, will self-disable at higher fps values", + "Upscale":"upscale the images of the next run once it's finished + make a video out of them", + "Upscale model":"model of the upscaler to use. 'realesr-animevideov3' is much faster but yields smoother, less detailed results. the other models only do x4", + "Upscale factor":"how many times to upscale, actual options depend on the chosen upscale model", + "FPS": "The frames per second that the video will run at", + "Output format": "select the type of video file to output", + "PIL gif": "create an animated GIF", + "FFMPEG mp4": "create an MP4 video file", + "FFmpeg location": "the path to where ffmpeg is located. Leave at default 'ffmpeg' if ffmpeg is in your PATH!", + "FFmpeg crf": "controls quality where lower is better, less compressed. values: 0 to 51, default 17", + "FFmpeg preset": "controls how good the compression is, and the operation speed. If you're not in a rush keep it at 'veryslow'", + "Add soundtrack": "when this box is checked, and FFMPEG mp4 is selected as the output format, an audio file will be multiplexed with the video.", + "Soundtrack path": "the path\/ URL to an audio file to accompany the video", + "Use manual settings": "when this is unchecked, the video will automatically be created in the same output folder as the images. Check this box to specify different settings for the creation of the video, specified by the following options", + "Render steps": "render each step of diffusion as a separate frame", + "Max video frames": "the maximum number of frames to include in the video, when use_manual_settings is checked", + "Image path": "the location of images to create the video from, when use_manual_settings is checked", + "MP4 path": "the output location of the mp4 file, when use_manual_settings is checked", + "Delete Imgs": "if enabled, raw imgs will be deleted after a successful video/ videos (upsacling, interpolation, gif) creation", + "Engine": "choose the frame interpolation engine and version", + "Interp X":"how many times to interpolate the source video. e.g source video fps of 12 and a value of x2 will yield a 24fps interpolated video", + "Slow-Mo X":"how many times to slow-down the video. *Naturally affects output fps as well", + "Keep Imgs": "delete or keep raw affected (interpolated/ upscaled depending on the UI section) png imgs", + "Interpolate an existing video":"This feature allows you to interpolate any video with a dedicated button. Video could be completly unrelated to deforum", + "In Frame Count": "uploaded video total frame count", + "In FPS":"uploaded video FPS", + "Interpolated Vid FPS":"calculated output-interpolated video FPS", + "In Res":"uploaded video resolution", + "Out Res":"output video resolution", + + // Looper Args + // "use_looper": "", + "Enable guided images mode": "check this box to enable guided images mode", + "Images to use for keyframe guidance": "images you iterate over, you can do local or web paths (no single backslashes!)", + "Image strength schedule": "how much the image should look like the previou one and new image frame init. strength schedule might be better if this is higher, around .75 during the keyfames you want to switch on", + "Blend factor max": "blendFactor = blendFactorMax - blendFactorSlope * cos((frame % tweening_frames_schedule) / (tweening_frames_schedule / 2))", + "Blend factor slope": "blendFactor = blendFactorMax - blendFactorSlope * cos((frame % tweening_frames_schedule) / (tweening_frames_schedule / 2))", + "Tweening frames schedule": "number of the frames that we will blend between current imagined image and input frame image", + "Color correction factor": "how close to get to the colors of the input frame image/ the amount each frame during a tweening step to use the new images colors", + // deforum.py / right side of the ui: + "Settings File": "Path to settings file you want to load. Path can be relative to webui folder OR full - absolute", + + // Hybrid Video + "Generate inputframes": "Initiates extraction of video frames from your video_init_path to the inputframes folder. You only need to do this once and then you can change it to False and re-render", + "Hybrid composite": "Engages hybrid compositing of video into animation in various ways with comp alpha as a master mix control.", + "Use init image as video": "Use init image instead of video. Doesn't require generation of inputframes.", + "First Frame as init image": "If True, uses the first frame of the video as the init_image. False can create interesting transition effects into the video, depending on settings.", + "Motion use prev img": "If enabled, changes the behavior or hybrid_motion to captures motion by comparing the current video frame to the previous rendered image, instead of the previous video frame.", + "Hybrid motion": "Analyzes video frames for camera motion and applies movement to render.", + "Flow method": "Selects the type of Optical Flow to use if Optical Flow is selected in Hybrid motion.", + "Comp mask type": "You don't need a mask to composite video. But, Mask types can control the way that video is composited with the previous image each frame.", + "Comp mask equalize": "Equalizes the mask for the composite before or after autocontrast operation (or both)", + "Comp mask auto contrast": "Auto-contrasts the mask for the composite. If enabled, uses the low/high autocontrast cutoff schedules.", + "Comp mask inverse": "Inverts the composite mask.", + "Comp save extra frames": "If this option is selected, many extra frames will be output for the various processes into the hybridframes folder.", + "Comp alpha schedule": "Schedule controls how much the composite video is mixed in, whether set to mask is None or using a mask. This is the master mix.", + "Flow factor schedule": "Affects optical flow hybrid motion. 1 is normal flow. -1 is negative flow. 0.5 is half flow, etc...", + "Comp mask blend alpha schedule": "If using a blend mask, this controls the blend amount of the video and render for the composite mask.", + "Comp mask contrast schedule": "Controls the contrast of the composite mask. 0.5 if half, 1 is normal contrast, 2 is double, etc.", + "Comp mask auto contrast cutoff high schedule": "If using autocontrast option, this is the high cutoff for the operation.", + "Comp mask auto contrast cutoff low schedule": "If using autocontrast option, this is the low cutoff for the operation.", + "Generate human masks": "This will generate masks of all the humans in a video. Created at generation of hybrid video. Not yet integrated for auto-masking, but it will create the masks, and you can then use the mask video manually.", +} + +onUiUpdate(function(){ + gradioApp().querySelectorAll('span, button, select, p').forEach(function(span){ + tooltip = deforum_titles[span.textContent]; + + if(!tooltip){ + tooltip = deforum_titles[span.value]; + } + + if(!tooltip){ + for (const c of span.classList) { + if (c in deforum_titles) { + tooltip = deforum_titles[c]; + break; + } + } + } + + if(tooltip){ + span.title = tooltip; + } + }) + + gradioApp().querySelectorAll('select').forEach(function(select){ + if (select.onchange != null) return; + + select.onchange = function(){ + select.title = deforum_titles[select.value] || ""; + } + }) +}) \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/javascript/deforum.js b/extensions-builtin/sd-webui-deforum/javascript/deforum.js new file mode 100644 index 0000000000000000000000000000000000000000..89279d3a548fd382a3329a0a1c3ef13f815e3e02 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/javascript/deforum.js @@ -0,0 +1,33 @@ +/* +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +Contact the authors: https://deforum.github.io/ +*/ + +function submit_deforum(){ + rememberGallerySelection('deforum_gallery') + showSubmitButtons('deforum', false) + + var id = randomId() + requestProgress(id, gradioApp().getElementById('deforum_gallery_container'), gradioApp().getElementById('deforum_gallery'), function(){ + showSubmitButtons('deforum', true) + }) + + var res = create_submit_args(arguments) + + res[0] = id + + return res +} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/preload.py b/extensions-builtin/sd-webui-deforum/preload.py new file mode 100644 index 0000000000000000000000000000000000000000..fbd2e4e5df1d4ff605dea01458d361c613749e67 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/preload.py @@ -0,0 +1,42 @@ +# 'Deforum' plugin for Automatic1111's Stable Diffusion WebUI. +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +def preload(parser): + parser.add_argument( + "--deforum-api", + action="store_true", + help="Enable the Deforum API", + default=None, + ) + parser.add_argument( + "--deforum-simple-api", + action="store_true", + help="Enable the simplified version of Deforum API", + default=None, + ) + parser.add_argument( + "--deforum-run-now", + type=str, + help="Comma-delimited list of deforum settings files to run immediately on startup", + default=None, + ) + parser.add_argument( + "--deforum-terminate-after-run-now", + action="store_true", + help="Whether to shut down the a1111 process immediately after completing the generations passed in to '--deforum-run-now'.", + default=None, + ) \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/pytest.ini b/extensions-builtin/sd-webui-deforum/pytest.ini new file mode 100644 index 0000000000000000000000000000000000000000..c24fe5bb9e65c74aec64d71269f3617ed1bb776b --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +filterwarnings = + ignore::DeprecationWarning diff --git a/extensions-builtin/sd-webui-deforum/requirements-dev.txt b/extensions-builtin/sd-webui-deforum/requirements-dev.txt new file mode 100644 index 0000000000000000000000000000000000000000..37ab3b597af84e5e152f952c064c41ef97f3e296 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/requirements-dev.txt @@ -0,0 +1,6 @@ +coverage +syrupy +pytest +tenacity +pydantic_requests +moviepy \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/requirements.txt b/extensions-builtin/sd-webui-deforum/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..627961884c59563c7945b0d4bc845e2de268bfc6 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/requirements.txt @@ -0,0 +1,8 @@ +numexpr +matplotlib +pandas +av +pims +imageio_ffmpeg +rich +gdown \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/default_settings.txt b/extensions-builtin/sd-webui-deforum/scripts/default_settings.txt new file mode 100644 index 0000000000000000000000000000000000000000..ddfea212da49fb47f3060c77553c253ce8c4b9e7 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/default_settings.txt @@ -0,0 +1,259 @@ +{ + "W": 512, + "H": 512, + "show_info_on_ui": true, + "tiling": false, + "restore_faces": false, + "seed_resize_from_w": 0, + "seed_resize_from_h": 0, + "seed": -1, + "sampler": "Euler a", + "steps": 25, + "batch_name": "Deforum_20230812221310", + "seed_behavior": "iter", + "seed_iter_N": 1, + "use_init": false, + "strength": 0.8, + "strength_0_no_init": true, + "init_image": null, + "use_mask": false, + "use_alpha_as_mask": false, + "mask_file": "https://deforum.github.io/a1/M1.jpg", + "invert_mask": false, + "mask_contrast_adjust": 1.0, + "mask_brightness_adjust": 1.0, + "overlay_mask": true, + "mask_overlay_blur": 4, + "fill": 0, + "full_res_mask": true, + "full_res_mask_padding": 4, + "reroll_blank_frames": "ignore", + "reroll_patience": 10.0, + "motion_preview_mode": false, + "prompts": { + "0": " tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, 4k, landscape --neg nsfw, nude", + "30": " anthropomorphic clean cat, surrounded by mandelbulb fractals, epic angle and pose, symmetrical, 3d, depth of field --neg nsfw, nude", + "60": " a beautiful coconut --neg photo, realistic nsfw, nude", + "90": " a beautiful durian, amazing award winning photography --neg nsfw, nude" + }, + "positive_prompts": "", + "negative_prompts": "nsfw, nude", + "animation_mode": "3D", + "max_frames": 120, + "border": "replicate", + "angle": "0: (0)", + "zoom": "0: (1.0025+0.002*sin(1.25*3.14*t/30))", + "translation_x": "0: (0)", + "translation_y": "0: (0)", + "translation_z": "0: (1.75)", + "transform_center_x": "0: (0.5)", + "transform_center_y": "0: (0.5)", + "rotation_3d_x": "0: (0)", + "rotation_3d_y": "0: (0)", + "rotation_3d_z": "0: (0)", + "enable_perspective_flip": false, + "perspective_flip_theta": "0: (0)", + "perspective_flip_phi": "0: (0)", + "perspective_flip_gamma": "0: (0)", + "perspective_flip_fv": "0: (53)", + "noise_schedule": "0: (0.065)", + "strength_schedule": "0: (0.65)", + "contrast_schedule": "0: (1.0)", + "cfg_scale_schedule": "0: (7)", + "enable_steps_scheduling": false, + "steps_schedule": "0: (25)", + "fov_schedule": "0: (70)", + "aspect_ratio_schedule": "0: (1)", + "aspect_ratio_use_old_formula": false, + "near_schedule": "0: (200)", + "far_schedule": "0: (10000)", + "seed_schedule": "0:(s), 1:(-1), \"max_f-2\":(-1), \"max_f-1\":(s)", + "pix2pix_img_cfg_scale_schedule": "0:(1.5)", + "enable_subseed_scheduling": false, + "subseed_schedule": "0: (1)", + "subseed_strength_schedule": "0: (0)", + "enable_sampler_scheduling": false, + "sampler_schedule": "0: (\"Euler a\")", + "use_noise_mask": false, + "mask_schedule": "0: (\"{video_mask}\")", + "noise_mask_schedule": "0: (\"{video_mask}\")", + "enable_checkpoint_scheduling": false, + "checkpoint_schedule": "0: (\"model1.ckpt\"), 100: (\"model2.safetensors\")", + "enable_clipskip_scheduling": false, + "clipskip_schedule": "0: (2)", + "enable_noise_multiplier_scheduling": true, + "noise_multiplier_schedule": "0: (1.05)", + "resume_from_timestring": false, + "resume_timestring": "20230129210106", + "enable_ddim_eta_scheduling": false, + "ddim_eta_schedule": "0: (0)", + "enable_ancestral_eta_scheduling": false, + "ancestral_eta_schedule": "0: (1)", + "amount_schedule": "0: (0.1)", + "kernel_schedule": "0: (5)", + "sigma_schedule": "0: (1)", + "threshold_schedule": "0: (0)", + "color_coherence": "LAB", + "color_coherence_image_path": "", + "color_coherence_video_every_N_frames": 1, + "color_force_grayscale": false, + "legacy_colormatch": false, + "diffusion_cadence": 2, + "optical_flow_cadence": "None", + "cadence_flow_factor_schedule": "0: (1)", + "optical_flow_redo_generation": "None", + "redo_flow_factor_schedule": "0: (1)", + "diffusion_redo": "0", + "noise_type": "perlin", + "perlin_octaves": 4, + "perlin_persistence": 0.5, + "use_depth_warping": true, + "depth_algorithm": "Midas-3-Hybrid", + "midas_weight": 0.2, + "padding_mode": "border", + "sampling_mode": "bicubic", + "save_depth_maps": false, + "video_init_path": "https://deforum.github.io/a1/V1.mp4", + "extract_nth_frame": 1, + "extract_from_frame": 0, + "extract_to_frame": -1, + "overwrite_extracted_frames": false, + "use_mask_video": false, + "video_mask_path": "https://deforum.github.io/a1/VM1.mp4", + "hybrid_comp_alpha_schedule": "0:(0.5)", + "hybrid_comp_mask_blend_alpha_schedule": "0:(0.5)", + "hybrid_comp_mask_contrast_schedule": "0:(1)", + "hybrid_comp_mask_auto_contrast_cutoff_high_schedule": "0:(100)", + "hybrid_comp_mask_auto_contrast_cutoff_low_schedule": "0:(0)", + "hybrid_flow_factor_schedule": "0:(1)", + "hybrid_generate_inputframes": false, + "hybrid_generate_human_masks": "None", + "hybrid_use_first_frame_as_init_image": true, + "hybrid_motion": "None", + "hybrid_motion_use_prev_img": false, + "hybrid_flow_consistency": false, + "hybrid_consistency_blur": 2, + "hybrid_flow_method": "RAFT", + "hybrid_composite": "None", + "hybrid_use_init_image": false, + "hybrid_comp_mask_type": "None", + "hybrid_comp_mask_inverse": false, + "hybrid_comp_mask_equalize": "None", + "hybrid_comp_mask_auto_contrast": false, + "hybrid_comp_save_extra_frames": false, + "parseq_manifest": "", + "parseq_use_deltas": true, + "use_looper": false, + "init_images": "{\n \"0\": \"https://deforum.github.io/a1/Gi1.png\",\n \"max_f/4-5\": \"https://deforum.github.io/a1/Gi2.png\",\n \"max_f/2-10\": \"https://deforum.github.io/a1/Gi3.png\",\n \"3*max_f/4-15\": \"https://deforum.github.io/a1/Gi4.jpg\",\n \"max_f-20\": \"https://deforum.github.io/a1/Gi1.png\"\n}", + "image_strength_schedule": "0:(0.75)", + "blendFactorMax": "0:(0.35)", + "blendFactorSlope": "0:(0.25)", + "tweening_frames_schedule": "0:(20)", + "color_correction_factor": "0:(0.075)", + "cn_1_overwrite_frames": true, + "cn_1_vid_path": "", + "cn_1_mask_vid_path": "", + "cn_1_enabled": false, + "cn_1_low_vram": false, + "cn_1_pixel_perfect": false, + "cn_1_module": "none", + "cn_1_model": "None", + "cn_1_weight": "0:(1)", + "cn_1_guidance_start": "0:(0.0)", + "cn_1_guidance_end": "0:(1.0)", + "cn_1_processor_res": 64, + "cn_1_threshold_a": 64, + "cn_1_threshold_b": 64, + "cn_1_resize_mode": "Inner Fit (Scale to Fit)", + "cn_1_control_mode": "Balanced", + "cn_1_loopback_mode": false, + "cn_2_overwrite_frames": true, + "cn_2_vid_path": "", + "cn_2_mask_vid_path": "", + "cn_2_enabled": false, + "cn_2_low_vram": false, + "cn_2_pixel_perfect": false, + "cn_2_module": "none", + "cn_2_model": "None", + "cn_2_weight": "0:(1)", + "cn_2_guidance_start": "0:(0.0)", + "cn_2_guidance_end": "0:(1.0)", + "cn_2_processor_res": 64, + "cn_2_threshold_a": 64, + "cn_2_threshold_b": 64, + "cn_2_resize_mode": "Inner Fit (Scale to Fit)", + "cn_2_control_mode": "Balanced", + "cn_2_loopback_mode": false, + "cn_3_overwrite_frames": true, + "cn_3_vid_path": "", + "cn_3_mask_vid_path": "", + "cn_3_enabled": false, + "cn_3_low_vram": false, + "cn_3_pixel_perfect": false, + "cn_3_module": "none", + "cn_3_model": "None", + "cn_3_weight": "0:(1)", + "cn_3_guidance_start": "0:(0.0)", + "cn_3_guidance_end": "0:(1.0)", + "cn_3_processor_res": 64, + "cn_3_threshold_a": 64, + "cn_3_threshold_b": 64, + "cn_3_resize_mode": "Inner Fit (Scale to Fit)", + "cn_3_control_mode": "Balanced", + "cn_3_loopback_mode": false, + "cn_4_overwrite_frames": true, + "cn_4_vid_path": "", + "cn_4_mask_vid_path": "", + "cn_4_enabled": false, + "cn_4_low_vram": false, + "cn_4_pixel_perfect": false, + "cn_4_module": "none", + "cn_4_model": "None", + "cn_4_weight": "0:(1)", + "cn_4_guidance_start": "0:(0.0)", + "cn_4_guidance_end": "0:(1.0)", + "cn_4_processor_res": 64, + "cn_4_threshold_a": 64, + "cn_4_threshold_b": 64, + "cn_4_resize_mode": "Inner Fit (Scale to Fit)", + "cn_4_control_mode": "Balanced", + "cn_4_loopback_mode": false, + "cn_5_overwrite_frames": true, + "cn_5_vid_path": "", + "cn_5_mask_vid_path": "", + "cn_5_enabled": false, + "cn_5_low_vram": false, + "cn_5_pixel_perfect": false, + "cn_5_module": "none", + "cn_5_model": "None", + "cn_5_weight": "0:(1)", + "cn_5_guidance_start": "0:(0.0)", + "cn_5_guidance_end": "0:(1.0)", + "cn_5_processor_res": 64, + "cn_5_threshold_a": 64, + "cn_5_threshold_b": 64, + "cn_5_resize_mode": "Inner Fit (Scale to Fit)", + "cn_5_control_mode": "Balanced", + "cn_5_loopback_mode": false, + "skip_video_creation": false, + "fps": 15, + "make_gif": false, + "delete_imgs": false, + "delete_input_frames": false, + "add_soundtrack": "None", + "soundtrack_path": "https://deforum.github.io/a1/A1.mp3", + "r_upscale_video": false, + "r_upscale_factor": "x2", + "r_upscale_model": "realesr-animevideov3", + "r_upscale_keep_imgs": true, + "store_frames_in_ram": false, + "frame_interpolation_engine": "None", + "frame_interpolation_x_amount": 2, + "frame_interpolation_slow_mo_enabled": false, + "frame_interpolation_slow_mo_amount": 2, + "frame_interpolation_keep_imgs": true, + "frame_interpolation_use_upscaled": false, + "sd_model_name": "revAnimated_v122.safetensors", + "sd_model_hash": "3f4fefd9", + "deforum_git_commit_id": "eb16c856" +} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum.py b/extensions-builtin/sd-webui-deforum/scripts/deforum.py new file mode 100644 index 0000000000000000000000000000000000000000..2895337d11e0e535e19f617c25bd3fcf89e77c85 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum.py @@ -0,0 +1,42 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os + +import modules.paths as ph +from modules import script_callbacks +from modules.shared import cmd_opts +from scripts.deforum_extend_paths import deforum_sys_extend + + +def init_deforum(): + # use sys.path.extend to make sure all of our files are available for importation + deforum_sys_extend() + + # create the Models/Deforum folder, where many of the deforum related models/ packages will be downloaded + os.makedirs(ph.models_path + '/Deforum', exist_ok=True) + + # import our on_ui_tabs and on_ui_settings functions from the respected files + from deforum_helpers.ui_right import on_ui_tabs + from deforum_helpers.ui_settings import on_ui_settings + + # trigger webui's extensions mechanism using our imported main functions - + # first to create the actual deforum gui, then to make the deforum tab in webui's settings section + script_callbacks.on_ui_tabs(on_ui_tabs) + script_callbacks.on_ui_settings(on_ui_settings) + +init_deforum() + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_api.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_api.py new file mode 100644 index 0000000000000000000000000000000000000000..9d492e7cabe871835f1a9e8fced82042ad3f8898 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_api.py @@ -0,0 +1,485 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import atexit +import json +import random +import tempfile +import traceback +import logging +import threading +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass, replace +from datetime import datetime +from typing import Any, Dict, List +from deforum_api_models import Batch, DeforumJobErrorType, DeforumJobStatusCategory, DeforumJobPhase, DeforumJobStatus +from contextlib import contextmanager +from deforum_extend_paths import deforum_sys_extend + +import gradio as gr +from deforum_helpers.args import (DeforumAnimArgs, DeforumArgs, + DeforumOutputArgs, LoopArgs, ParseqArgs, + RootArgs, get_component_names) +from fastapi import FastAPI, Response, status + +from modules.shared import cmd_opts, opts, state + + +log = logging.getLogger(__name__) +log_level = os.environ.get("DEFORUM_API_LOG_LEVEL") or os.environ.get("SD_WEBUI_LOG_LEVEL") or "INFO" +log.setLevel(log_level) +logging.basicConfig( + format='%(asctime)s %(levelname)s [%(name)s] %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', +) + +def make_ids(job_count: int): + batch_id = f"batch({random.randint(0, 1e9)})" + job_ids = [f"{batch_id}-{i}" for i in range(job_count)] + return [batch_id, job_ids] + + +def get_default_value(name:str): + allArgs = RootArgs() | DeforumAnimArgs() | DeforumArgs() | LoopArgs() | ParseqArgs() | DeforumOutputArgs() + if name in allArgs and isinstance(allArgs[name], dict): + return allArgs[name].get("value", None) + elif name in allArgs: + return allArgs[name] + else: + return None + + +def run_deforum_batch(batch_id: str, job_ids: [str], deforum_settings_files: List[Any], opts_overrides: Dict[str, Any] = None): + log.info(f"Starting batch {batch_id} in thread {threading.get_ident()}.") + try: + with A1111OptionsOverrider(opts_overrides): + # Fill deforum args with default values. + # We are overriding everything with the batch files, but some values are eagerly validated, so must appear valid. + component_names = get_component_names() + prefixed_gradio_args = 2 + expected_arg_count = prefixed_gradio_args + len(component_names) + run_deforum_args = [None] * expected_arg_count + for idx, name in enumerate(component_names): + run_deforum_args[prefixed_gradio_args + idx] = get_default_value(name) + + # For some values, defaults don't pass validation... + run_deforum_args[prefixed_gradio_args + component_names.index('animation_prompts')] = '{"0":"dummy value"}' + run_deforum_args[prefixed_gradio_args + component_names.index('animation_prompts_negative')] = '' + run_deforum_args[prefixed_gradio_args + component_names.index('animation_prompts_positive')] = '' + + # Arg 0 is a UID for the batch + run_deforum_args[0] = batch_id + + # Setup batch override + run_deforum_args[prefixed_gradio_args + component_names.index('override_settings_with_file')] = True + run_deforum_args[prefixed_gradio_args + component_names.index('custom_settings_file')] = deforum_settings_files + + # Cleanup old state from previously cancelled jobs + # WARNING: not thread safe because state is global. If we ever run multiple batches in parallel, this will need to be reworked. + state.skipped = False + state.interrupted = False + + # Invoke deforum with appropriate args + from deforum_helpers.run_deforum import run_deforum + run_deforum(*run_deforum_args) + + except Exception as e: + log.error(f"Batch {batch_id} failed: {e}") + traceback.print_exc() + for job_id in job_ids: + # Mark all jobs in this batch as failed + JobStatusTracker().fail_job(job_id, 'TERMINAL', {e}) + + +# API to allow a batch of jobs to be submitted to the deforum pipeline. +# A batch is settings object OR a list of settings objects. +# A settings object is the JSON structure you can find in your saved settings.txt files. +# +# Request format: +# { +# "deforum_settings": [ +# { ... settings object ... }, +# { ... settings object ... }, +# ] +# } +# OR: +# { +# "deforum_settings": { ... settings object ... } +# } +# +# Each settings object in the request represents a job to run as part of the batch. +# Each submitted batch will be given a batch ID which the user can use to query the status of all jobs in the batch. +# +def deforum_api(_: gr.Blocks, app: FastAPI): + + deforum_sys_extend() + + apiState = ApiState() + + # Submit a new batch + @app.post("/deforum_api/batches") + async def run_batch(batch: Batch, response: Response): + + # Extract the settings files from the request + deforum_settings_data = batch.deforum_settings + if not deforum_settings_data: + response.status_code = status.HTTP_400_BAD_REQUEST + return {"message": "No settings files provided. Please provide an element 'deforum_settings' of type list in the request JSON payload."} + + if not isinstance(deforum_settings_data, list): + # Allow input deforum_settings to be top-level object as well as single object list + deforum_settings_data = [deforum_settings_data] + + deforum_settings_tempfiles = [] + for data in deforum_settings_data: + temp_file = tempfile.NamedTemporaryFile(mode='w+t', delete=False) + json.dump(data, temp_file) + temp_file.close() + deforum_settings_tempfiles.append(temp_file) + + job_count = len(deforum_settings_tempfiles) + [batch_id, job_ids] = make_ids(job_count) + apiState.submit_job(batch_id, job_ids, deforum_settings_tempfiles, batch.options_overrides) + + for idx, job_id in enumerate(job_ids): + JobStatusTracker().accept_job(batch_id=batch_id, job_id=job_id, deforum_settings=deforum_settings_data[idx], options_overrides=batch.options_overrides) + + response.status_code = status.HTTP_202_ACCEPTED + return {"message": "Job(s) accepted", "batch_id": batch_id, "job_ids": job_ids } + + # List all batches and theit job ids + @app.get("/deforum_api/batches") + async def list_batches(id: str): + return JobStatusTracker().batches + + # Show the details of all jobs in a batch + @app.get("/deforum_api/batches/{id}") + async def get_batch(id: str, response: Response): + jobsForBatch = JobStatusTracker().batches[id] + if not jobsForBatch: + response.status_code = status.HTTP_404_NOT_FOUND + return {"id": id, "status": "NOT FOUND"} + return [JobStatusTracker().get(job_id) for job_id in jobsForBatch] + + # Cancel all jobs in a batch + @app.delete("/deforum_api/batches/{id}") + async def cancel_batch(id: str, response: Response): + jobsForBatch = JobStatusTracker().batches[id] + cancelled_jobs = [] + if not jobsForBatch: + response.status_code = status.HTTP_404_NOT_FOUND + return {"id": id, "status": "NOT FOUND"} + for job_id in jobsForBatch: + try: + cancelled = _cancel_job(job_id) + if cancelled: + cancelled_jobs.append(job_id) + except: + log.warning(f"Failed to cancel job {job_id}") + + return {"ids": cancelled_jobs, "message:": f"{len(cancelled_jobs)} job(s) cancelled." } + + # Show details of all jobs across al batches + @app.get("/deforum_api/jobs") + async def list_jobs(): + return JobStatusTracker().statuses + + # Show details of a single job + @app.get("/deforum_api/jobs/{id}") + async def get_job(id: str, response: Response): + jobStatus = JobStatusTracker().get(id) + if not jobStatus: + response.status_code = status.HTTP_404_NOT_FOUND + return {"id": id, "status": "NOT FOUND"} + return jobStatus + + # Cancel a single job + @app.delete("/deforum_api/jobs/{id}") + async def cancel_job(id: str, response: Response): + try: + if _cancel_job(id): + return {"id": id, "message": "Job cancelled."} + else: + response.status_code = status.HTTP_400_BAD_REQUEST + return {"id": id, "message": f"Job with ID {id} not in a cancellable state. Has it already finished?"} + except FileNotFoundError as e: + response.status_code = status.HTTP_404_NOT_FOUND + return {"id": id, "message": f"Job with ID {id} not found."} + + # Shared logic for job cancellation + def _cancel_job(job_id:str): + jobStatus = JobStatusTracker().get(job_id) + if not jobStatus: + raise FileNotFoundError(f"Job {job_id} not found.") + + if jobStatus.status != DeforumJobStatusCategory.ACCEPTED: + # Ignore jobs in completed state (error or success) + return False + + if job_id in ApiState().submitted_jobs: + # Remove job from queue + ApiState().submitted_jobs[job_id].cancel() + if jobStatus.phase != DeforumJobPhase.QUEUED and jobStatus.phase != DeforumJobPhase.DONE: + # Job must be actively running - interrupt it. + # WARNING: + # - Possible race condition: if job_id just finished after the check and another started, we'll interrupt the wrong job. + # - Not thread safe because State object is global. Will break with concurrent jobs. + state.interrupt() + JobStatusTracker().cancel_job(job_id, "Cancelled due to user request.") + return True + +class Singleton(type): + _instances = {} + def __call__(cls, *args, **kwargs): + if cls not in cls._instances: + cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) + return cls._instances[cls] + +# Maintains persistent state required by API, e.g. thread pook, list of submitted jobs. +class ApiState(metaclass=Singleton): + + ## Locking concurrency to 1. Concurrent generation does seem to work, but it's not clear if it's safe. + ## TODO: more experimentation required. + deforum_api_executor = ThreadPoolExecutor(max_workers=1) + submitted_jobs : Dict[str, Any] = {} + + @staticmethod + def cleanup(): + ApiState().deforum_api_executor.shutdown(wait=False) + + def submit_job(self, batch_id: str, job_ids: [str], deforum_settings: List[Any], opts_overrides: Dict[str, Any]): + log.debug(f"Submitting batch {batch_id} to threadpool.") + future = self.deforum_api_executor.submit(lambda: run_deforum_batch(batch_id, job_ids, deforum_settings, opts_overrides)) + self.submitted_jobs[batch_id] = future + +atexit.register(ApiState.cleanup) + + +class A1111OptionsOverrider(object): + def __init__(self, opts_overrides: Dict[str, Any]): + self.opts_overrides = opts_overrides + + def __enter__(self): + if self.opts_overrides is not None and len(self.opts_overrides)>1: + self.original_opts = {k: opts.data[k] for k in self.opts_overrides.keys() if k in opts.data} + log.debug(f"Captured options to override: {self.original_opts}") + log.info(f"Setting options: {self.opts_overrides}") + for k, v in self.opts_overrides.items(): + setattr(opts, k, v) + else: + self.original_opts = None + return self + + def __exit__(self, exception_type, exception_value, traceback): + if (exception_type is not None): + log.warning(f"Error during batch execution: {exception_type} - {exception_value}") + log.debug(f"{traceback}") + if (self.original_opts is not None): + log.info(f"Restoring options: {self.original_opts}") + for k, v in self.original_opts.items(): + setattr(opts, k, v) + + +# Maintains state that tracks status of submitted jobs, +# so that clients can query job status. +class JobStatusTracker(metaclass=Singleton): + statuses: Dict[str, DeforumJobStatus] = {} + batches: Dict[str, List[str]] = {} + + def accept_job(self, batch_id : str, job_id: str, deforum_settings : List[Dict[str, Any]] , options_overrides : Dict[str, Any]): + if batch_id in self.batches: + self.batches[batch_id].append(job_id) + else: + self.batches[batch_id] = [job_id] + + now = datetime.now().timestamp() + self.statuses[job_id] = DeforumJobStatus( + id=job_id, + status= DeforumJobStatusCategory.ACCEPTED, + phase=DeforumJobPhase.QUEUED, + error_type=DeforumJobErrorType.NONE, + phase_progress=0.0, + started_at=now, + last_updated=now, + execution_time=0, + update_interval_time=0, + updates=0, + message=None, + outdir=None, + timestring=None, + deforum_settings=deforum_settings, + options_overrides=options_overrides, + ) + + def update_phase(self, job_id: str, phase: DeforumJobPhase, progress: float = 0): + if job_id in self.statuses: + current_status = self.statuses[job_id] + now = datetime.now().timestamp() + new_status = replace( + current_status, + phase=phase, + phase_progress=progress, + last_updated=now, + execution_time=now-current_status.started_at, + update_interval_time=now-current_status.last_updated, + updates=current_status.updates+1 + ) + self.statuses[job_id] = new_status + + def update_output_info(self, job_id: str, outdir: str, timestring: str): + if job_id in self.statuses: + current_status = self.statuses[job_id] + now = datetime.now().timestamp() + new_status = replace( + current_status, + outdir=outdir, + timestring=timestring, + last_updated=now, + execution_time=now-current_status.started_at, + update_interval_time=now-current_status.last_updated, + updates=current_status.updates+1 + ) + self.statuses[job_id] = new_status + + def complete_job(self, job_id: str): + if job_id in self.statuses: + current_status = self.statuses[job_id] + now = datetime.now().timestamp() + new_status = replace( + current_status, + status=DeforumJobStatusCategory.SUCCEEDED, + phase=DeforumJobPhase.DONE, + phase_progress=1.0, + last_updated=now, + execution_time=now-current_status.started_at, + update_interval_time=now-current_status.last_updated, + updates=current_status.updates+1 + ) + self.statuses[job_id] = new_status + + def fail_job(self, job_id: str, error_type: str, message: str): + if job_id in self.statuses: + current_status = self.statuses[job_id] + now = datetime.now().timestamp() + new_status = replace( + current_status, + status=DeforumJobStatusCategory.FAILED, + error_type=error_type, + message=message, + last_updated=now, + execution_time=now-current_status.started_at, + update_interval_time=now-current_status.last_updated, + updates=current_status.updates+1 + ) + self.statuses[job_id] = new_status + + def cancel_job(self, job_id: str, message: str): + if job_id in self.statuses: + current_status = self.statuses[job_id] + now = datetime.now().timestamp() + new_status = replace( + current_status, + status=DeforumJobStatusCategory.CANCELLED, + message=message, + last_updated=now, + execution_time=now-current_status.started_at, + update_interval_time=now-current_status.last_updated, + updates=current_status.updates+1 + ) + self.statuses[job_id] = new_status + + + def get(self, job_id:str): + return self.statuses[job_id] if job_id in self.statuses else None + +def deforum_init_batch(_: gr.Blocks, app: FastAPI): + deforum_sys_extend() + settings_files = [open(filename, 'r') for filename in cmd_opts.deforum_run_now.split(",")] + [batch_id, job_ids] = make_ids(len(settings_files)) + log.info(f"Starting init batch {batch_id} with job(s) {job_ids}...") + + run_deforum_batch(batch_id, job_ids, settings_files, None) + + if cmd_opts.deforum_terminate_after_run_now: + import os + os._exit(0) + +# A simplified, but safe version of Deforum's API +def deforum_simple_api(_: gr.Blocks, app: FastAPI): + deforum_sys_extend() + from fastapi.exceptions import RequestValidationError + from fastapi.responses import JSONResponse + from fastapi import FastAPI, Query, Request, UploadFile + from fastapi.encoders import jsonable_encoder + from deforum_helpers.general_utils import get_deforum_version + import uuid, pathlib + + @app.exception_handler(RequestValidationError) + async def validation_exception_handler(request: Request, exc: RequestValidationError): + return JSONResponse( + status_code=422, + content=jsonable_encoder({"detail": exc.errors(), "body": exc.body}), + ) + + @app.get("/deforum/api_version") + async def deforum_api_version(): + return JSONResponse(content={"version": '1.0'}) + + @app.get("/deforum/version") + async def deforum_version(): + return JSONResponse(content={"version": get_deforum_version()}) + + @app.post("/deforum/run") + async def deforum_run(settings_json:str, allowed_params:str = ""): + try: + allowed_params = allowed_params.split(';') + deforum_settings = json.loads(settings_json) + with open(os.path.join(pathlib.Path(__file__).parent.absolute(), 'default_settings.txt'), 'r', encoding='utf-8') as f: + default_settings = json.loads(f.read()) + for k, _ in default_settings.items(): + if k in deforum_settings and k in allowed_params: + default_settings[k] = deforum_settings[k] + deforum_settings = default_settings + run_id = uuid.uuid4().hex + deforum_settings['batch_name'] = run_id + deforum_settings = json.dumps(deforum_settings, indent=4, ensure_ascii=False) + settings_file = f"{run_id}.txt" + with open(settings_file, 'w', encoding='utf-8') as f: + f.write(deforum_settings) + class SettingsWrapper: + def __init__(self, filename): + self.name = filename + [batch_id, job_ids] = make_ids(1) + outdir = os.path.join(os.getcwd(), opts.outdir_samples or opts.outdir_img2img_samples, str(run_id)) + run_deforum_batch(batch_id, job_ids, [SettingsWrapper(settings_file)], None) + return JSONResponse(content={"outdir": outdir}) + except Exception as e: + print(e) + traceback.print_exc() + return JSONResponse(status_code=500, content={"detail": "An error occurred while processing the video."},) + +# Setup A1111 initialisation hooks +try: + import modules.script_callbacks as script_callbacks + if cmd_opts.deforum_api: + script_callbacks.on_app_started(deforum_api) + if cmd_opts.deforum_simple_api: + script_callbacks.on_app_started(deforum_simple_api) + if cmd_opts.deforum_run_now: + script_callbacks.on_app_started(deforum_init_batch) +except: + pass diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_api_models.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_api_models.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6113b259a54477f694867d861d0f18230116ea --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_api_models.py @@ -0,0 +1,60 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +from pydantic import BaseModel +from typing import Any, Dict, List, Optional, Union +from dataclasses import dataclass +from enum import Enum + +class Batch(BaseModel): + deforum_settings : Optional[Union[Dict[str, Any],List[Dict[str, Any]]]] + options_overrides : Optional[Dict[str, Any]] + +class DeforumJobStatusCategory(str, Enum): + ACCEPTED = "ACCEPTED" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + CANCELLED = "CANCELLED" + +class DeforumJobPhase(str, Enum): + QUEUED = "QUEUED" + PREPARING = "PREPARING" + GENERATING = "GENERATING" + POST_PROCESSING = "POST_PROCESSING" + DONE = "DONE" + +class DeforumJobErrorType(str, Enum): + NONE = "NONE" + RETRYABLE = "RETRYABLE" + TERMINAL = "TERMINAL" + +@dataclass(frozen=True) +class DeforumJobStatus(BaseModel): + id: str + status : DeforumJobStatusCategory + phase : DeforumJobPhase + error_type : DeforumJobErrorType + phase_progress : float + started_at: float + last_updated: float + execution_time: float # time between job start and the last status update + update_interval_time: float # time between the last two status updates + updates: int # number of status updates so far + message: Optional[str] + outdir: Optional[str] + timestring: Optional[str] + deforum_settings : Optional[List[Dict[str, Any]]] + options_overrides : Optional[Dict[str, Any]] \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_extend_paths.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_extend_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..ef10c8d9e297e68a827e9560170c20256e0e3645 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_extend_paths.py @@ -0,0 +1,33 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import sys + +def deforum_sys_extend(): + deforum_folder_name = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2]) + + basedirs = [os.getcwd()] + if 'google.colab' in sys.modules: + basedirs.append('/content/gdrive/MyDrive/sd/stable-diffusion-webui') # for TheLastBen's colab + for _ in basedirs: + deforum_paths_to_ensure = [ + os.path.join(deforum_folder_name, 'scripts'), + os.path.join(deforum_folder_name, 'scripts', 'deforum_helpers', 'src') + ] + for deforum_scripts_path_fix in deforum_paths_to_ensure: + if deforum_scripts_path_fix not in sys.path: + sys.path.extend([deforum_scripts_path_fix]) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/114763196.jpg b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/114763196.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cf485dd046ef89847c06cc3dd812a6fecabe210 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/114763196.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b06270a819babd08b5ec9d06a539781979f31c3ccb47d9e3d56621e804bd670f +size 24692 diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/RAFT.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/RAFT.py new file mode 100644 index 0000000000000000000000000000000000000000..444d273eaa3e749301ce9ffd64bd8b42f3cf88ba --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/RAFT.py @@ -0,0 +1,44 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import torch +import numpy as np +import torchvision.transforms.functional as F +from torchvision.models.optical_flow import Raft_Large_Weights, raft_large + +class RAFT: + def __init__(self): + weights = Raft_Large_Weights.DEFAULT + self.transforms = weights.transforms() + self.device = "cuda" if torch.cuda.is_available() else "cpu" + self.model = raft_large(weights=weights, progress=False).to(self.device).eval() + + def predict(self, image1, image2, num_flow_updates:int = 50): + img1 = F.to_tensor(image1) + img2 = F.to_tensor(image2) + img1_batch, img2_batch = img1.unsqueeze(0), img2.unsqueeze(0) + img1_batch, img2_batch = self.transforms(img1_batch, img2_batch) + + with torch.no_grad(): + flow = self.model(image1=img1_batch.to(self.device), image2=img2_batch.to(self.device), num_flow_updates=num_flow_updates)[-1].cpu().numpy()[0] + + # align the flow array to have the shape (w, h, 2) so it's compatible with the rest of CV2's flow methods + flow = np.transpose(flow, (1, 2, 0)) + + return flow + + def delete_model(self): + del self.model \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/animation.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/animation.py new file mode 100644 index 0000000000000000000000000000000000000000..8123770cfc570ce5de6af3f640012088fab96e7b --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/animation.py @@ -0,0 +1,429 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import numpy as np +import cv2 +import py3d_tools as p3d # this is actually a file in our /src folder! +from functools import reduce +import math +import torch +from einops import rearrange +from modules.shared import state, opts +from .prompt import check_is_number +from .general_utils import debug_print + +def sample_from_cv2(sample: np.ndarray) -> torch.Tensor: + sample = ((sample.astype(float) / 255.0) * 2) - 1 + sample = sample[None].transpose(0, 3, 1, 2).astype(np.float16) + sample = torch.from_numpy(sample) + return sample + +def sample_to_cv2(sample: torch.Tensor, type=np.uint8) -> np.ndarray: + sample_f32 = rearrange(sample.squeeze().cpu().numpy(), "c h w -> h w c").astype(np.float32) + sample_f32 = ((sample_f32 * 0.5) + 0.5).clip(0, 1) + sample_int8 = (sample_f32 * 255) + return sample_int8.astype(type) + +def construct_RotationMatrixHomogenous(rotation_angles): + assert(type(rotation_angles)==list and len(rotation_angles)==3) + RH = np.eye(4,4) + cv2.Rodrigues(np.array(rotation_angles), RH[0:3, 0:3]) + return RH + +# https://en.wikipedia.org/wiki/Rotation_matrix +def getRotationMatrixManual(rotation_angles): + + rotation_angles = [np.deg2rad(x) for x in rotation_angles] + + phi = rotation_angles[0] # around x + gamma = rotation_angles[1] # around y + theta = rotation_angles[2] # around z + + # X rotation + Rphi = np.eye(4,4) + sp = np.sin(phi) + cp = np.cos(phi) + Rphi[1,1] = cp + Rphi[2,2] = Rphi[1,1] + Rphi[1,2] = -sp + Rphi[2,1] = sp + + # Y rotation + Rgamma = np.eye(4,4) + sg = np.sin(gamma) + cg = np.cos(gamma) + Rgamma[0,0] = cg + Rgamma[2,2] = Rgamma[0,0] + Rgamma[0,2] = sg + Rgamma[2,0] = -sg + + # Z rotation (in-image-plane) + Rtheta = np.eye(4,4) + st = np.sin(theta) + ct = np.cos(theta) + Rtheta[0,0] = ct + Rtheta[1,1] = Rtheta[0,0] + Rtheta[0,1] = -st + Rtheta[1,0] = st + + R = reduce(lambda x,y : np.matmul(x,y), [Rphi, Rgamma, Rtheta]) + + return R + +def getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sidelength): + + ptsIn2D = ptsIn[0,:] + ptsOut2D = ptsOut[0,:] + ptsOut2Dlist = [] + ptsIn2Dlist = [] + + for i in range(0,4): + ptsOut2Dlist.append([ptsOut2D[i,0], ptsOut2D[i,1]]) + ptsIn2Dlist.append([ptsIn2D[i,0], ptsIn2D[i,1]]) + + pin = np.array(ptsIn2Dlist) + [W/2.,H/2.] + pout = (np.array(ptsOut2Dlist) + [1.,1.]) * (0.5*sidelength) + pin = pin.astype(np.float32) + pout = pout.astype(np.float32) + + return pin, pout + + +def warpMatrix(W, H, theta, phi, gamma, scale, fV): + + # M is to be estimated + M = np.eye(4, 4) + + fVhalf = np.deg2rad(fV/2.) + d = np.sqrt(W*W+H*H) + sideLength = scale*d/np.cos(fVhalf) + h = d/(2.0*np.sin(fVhalf)) + n = h-(d/2.0) + f = h+(d/2.0) + + # Translation along Z-axis by -h + T = np.eye(4,4) + T[2,3] = -h + + # Rotation matrices around x,y,z + R = getRotationMatrixManual([phi, gamma, theta]) + + + # Projection Matrix + P = np.eye(4,4) + P[0,0] = 1.0/np.tan(fVhalf) + P[1,1] = P[0,0] + P[2,2] = -(f+n)/(f-n) + P[2,3] = -(2.0*f*n)/(f-n) + P[3,2] = -1.0 + + # pythonic matrix multiplication + F = reduce(lambda x,y : np.matmul(x,y), [P, T, R]) + + # shape should be 1,4,3 for ptsIn and ptsOut since perspectiveTransform() expects data in this way. + # In C++, this can be achieved by Mat ptsIn(1,4,CV_64FC3); + ptsIn = np.array([[ + [-W/2., H/2., 0.],[ W/2., H/2., 0.],[ W/2.,-H/2., 0.],[-W/2.,-H/2., 0.] + ]]) + ptsOut = np.array(np.zeros((ptsIn.shape), dtype=ptsIn.dtype)) + ptsOut = cv2.perspectiveTransform(ptsIn, F) + + ptsInPt2f, ptsOutPt2f = getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sideLength) + + # check float32 otherwise OpenCV throws an error + assert(ptsInPt2f.dtype == np.float32) + assert(ptsOutPt2f.dtype == np.float32) + M33 = cv2.getPerspectiveTransform(ptsInPt2f,ptsOutPt2f) + + return M33, sideLength + +def get_flip_perspective_matrix(W, H, keys, frame_idx): + perspective_flip_theta = keys.perspective_flip_theta_series[frame_idx] + perspective_flip_phi = keys.perspective_flip_phi_series[frame_idx] + perspective_flip_gamma = keys.perspective_flip_gamma_series[frame_idx] + perspective_flip_fv = keys.perspective_flip_fv_series[frame_idx] + M,sl = warpMatrix(W, H, perspective_flip_theta, perspective_flip_phi, perspective_flip_gamma, 1., perspective_flip_fv); + post_trans_mat = np.float32([[1, 0, (W-sl)/2], [0, 1, (H-sl)/2]]) + post_trans_mat = np.vstack([post_trans_mat, [0,0,1]]) + bM = np.matmul(M, post_trans_mat) + return bM + +def flip_3d_perspective(anim_args, prev_img_cv2, keys, frame_idx): + W, H = (prev_img_cv2.shape[1], prev_img_cv2.shape[0]) + return cv2.warpPerspective( + prev_img_cv2, + get_flip_perspective_matrix(W, H, keys, frame_idx), + (W, H), + borderMode=cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE + ) + +def anim_frame_warp(prev_img_cv2, args, anim_args, keys, frame_idx, depth_model=None, depth=None, device='cuda', half_precision = False): + + if anim_args.use_depth_warping: + if depth is None and depth_model is not None: + depth = depth_model.predict(prev_img_cv2, anim_args.midas_weight, half_precision) + + else: + depth = None + + if anim_args.animation_mode == '2D': + prev_img = anim_frame_warp_2d(prev_img_cv2, args, anim_args, keys, frame_idx) + else: # '3D' + prev_img = anim_frame_warp_3d(device, prev_img_cv2, depth, anim_args, keys, frame_idx) + + return prev_img, depth + +def anim_frame_warp_2d(prev_img_cv2, args, anim_args, keys, frame_idx): + angle = keys.angle_series[frame_idx] + zoom = keys.zoom_series[frame_idx] + translation_x = keys.translation_x_series[frame_idx] + translation_y = keys.translation_y_series[frame_idx] + transform_center_x = keys.transform_center_x_series[frame_idx] + transform_center_y = keys.transform_center_y_series[frame_idx] + center_point = (args.W * transform_center_x, args.H * transform_center_y) + rot_mat = cv2.getRotationMatrix2D(center_point, angle, zoom) + trans_mat = np.float32([[1, 0, translation_x], [0, 1, translation_y]]) + trans_mat = np.vstack([trans_mat, [0,0,1]]) + rot_mat = np.vstack([rot_mat, [0,0,1]]) + if anim_args.enable_perspective_flip: + bM = get_flip_perspective_matrix(args.W, args.H, keys, frame_idx) + rot_mat = np.matmul(bM, rot_mat, trans_mat) + else: + rot_mat = np.matmul(rot_mat, trans_mat) + return cv2.warpPerspective( + prev_img_cv2, + rot_mat, + (prev_img_cv2.shape[1], prev_img_cv2.shape[0]), + borderMode=cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE + ) + +def anim_frame_warp_3d(device, prev_img_cv2, depth, anim_args, keys, frame_idx): + TRANSLATION_SCALE = 1.0/200.0 # matches Disco + translate_xyz = [ + -keys.translation_x_series[frame_idx] * TRANSLATION_SCALE, + keys.translation_y_series[frame_idx] * TRANSLATION_SCALE, + -keys.translation_z_series[frame_idx] * TRANSLATION_SCALE + ] + rotate_xyz = [ + math.radians(keys.rotation_3d_x_series[frame_idx]), + math.radians(keys.rotation_3d_y_series[frame_idx]), + math.radians(keys.rotation_3d_z_series[frame_idx]) + ] + if anim_args.enable_perspective_flip: + prev_img_cv2 = flip_3d_perspective(anim_args, prev_img_cv2, keys, frame_idx) + rot_mat = p3d.euler_angles_to_matrix(torch.tensor(rotate_xyz, device=device), "XYZ").unsqueeze(0) + result = transform_image_3d_switcher(device if not device.type.startswith('mps') else torch.device('cpu'), prev_img_cv2, depth, rot_mat, translate_xyz, anim_args, keys, frame_idx) + torch.cuda.empty_cache() + return result + +def transform_image_3d_switcher(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx): + if anim_args.depth_algorithm.lower() in ['midas+adabins (old)', 'zoe+adabins (old)']: + return transform_image_3d_legacy(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx) + else: + return transform_image_3d_new(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx) + +def transform_image_3d_legacy(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx): + # adapted and optimized version of transform_image_3d from Disco Diffusion https://github.com/alembics/disco-diffusion + w, h = prev_img_cv2.shape[1], prev_img_cv2.shape[0] + + if anim_args.aspect_ratio_use_old_formula: + aspect_ratio = float(w)/float(h) + else: + aspect_ratio = keys.aspect_ratio_series[frame_idx] + + near = keys.near_series[frame_idx] + far = keys.far_series[frame_idx] + fov_deg = keys.fov_series[frame_idx] + persp_cam_old = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, device=device) + persp_cam_new = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, R=rot_mat, T=torch.tensor([translate]), device=device) + + # range of [-1,1] is important to torch grid_sample's padding handling + y,x = torch.meshgrid(torch.linspace(-1.,1.,h,dtype=torch.float32,device=device),torch.linspace(-1.,1.,w,dtype=torch.float32,device=device)) + if depth_tensor is None: + z = torch.ones_like(x) + else: + z = torch.as_tensor(depth_tensor, dtype=torch.float32, device=device) + xyz_old_world = torch.stack((x.flatten(), y.flatten(), z.flatten()), dim=1) + + xyz_old_cam_xy = persp_cam_old.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2] + xyz_new_cam_xy = persp_cam_new.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2] + + offset_xy = xyz_new_cam_xy - xyz_old_cam_xy + # affine_grid theta param expects a batch of 2D mats. Each is 2x3 to do rotation+translation. + identity_2d_batch = torch.tensor([[1.,0.,0.],[0.,1.,0.]], device=device).unsqueeze(0) + # coords_2d will have shape (N,H,W,2).. which is also what grid_sample needs. + coords_2d = torch.nn.functional.affine_grid(identity_2d_batch, [1,1,h,w], align_corners=False) + offset_coords_2d = coords_2d - torch.reshape(offset_xy, (h,w,2)).unsqueeze(0) + + image_tensor = rearrange(torch.from_numpy(prev_img_cv2.astype(np.float32)), 'h w c -> c h w').to(device) + new_image = torch.nn.functional.grid_sample( + image_tensor.add(1/512 - 0.0001).unsqueeze(0), + offset_coords_2d, + mode=anim_args.sampling_mode, + padding_mode=anim_args.padding_mode, + align_corners=False + ) + + # convert back to cv2 style numpy array + result = rearrange( + new_image.squeeze().clamp(0,255), + 'c h w -> h w c' + ).cpu().numpy().astype(prev_img_cv2.dtype) + return result + +def transform_image_3d_new(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx): + ''' + originally an adapted and optimized version of transform_image_3d from Disco Diffusion https://github.com/alembics/disco-diffusion + modified by reallybigname to control various incoming tensors + ''' + if anim_args.depth_algorithm.lower().startswith('midas'): # 'Midas-3-Hybrid' or 'Midas-3.1-BeitLarge' + depth = 1 + depth_factor = -1 + depth_offset = -2 + elif anim_args.depth_algorithm.lower() == "adabins": + depth = 1 + depth_factor = 1 + depth_offset = 1 + elif anim_args.depth_algorithm.lower() == "leres": + depth = 1 + depth_factor = 1 + depth_offset = 1 + elif anim_args.depth_algorithm.lower() == "zoe": + depth = 1 + depth_factor = 1 + depth_offset = 1 + else: + raise Exception(f"Unknown depth_algorithm passed to transform_image_3d function: {anim_args.depth_algorithm}") + + w, h = prev_img_cv2.shape[1], prev_img_cv2.shape[0] + + # depth stretching aspect ratio (has nothing to do with image dimensions - which is why the old formula was flawed) + aspect_ratio = float(w)/float(h) if anim_args.aspect_ratio_use_old_formula else keys.aspect_ratio_series[frame_idx] + + # get projection keys + near = keys.near_series[frame_idx] + far = keys.far_series[frame_idx] + fov_deg = keys.fov_series[frame_idx] + + # get perspective cams old (still) and new (transformed) + persp_cam_old = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, device=device) + persp_cam_new = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, R=rot_mat, T=torch.tensor([translate]), device=device) + + # make xy meshgrid - range of [-1,1] is important to torch grid_sample's padding handling + y,x = torch.meshgrid(torch.linspace(-1.,1.,h,dtype=torch.float32,device=device),torch.linspace(-1.,1.,w,dtype=torch.float32,device=device)) + + # test tensor for validity (some are corrupted for some reason) + depth_tensor_invalid = depth_tensor is None or torch.isnan(depth_tensor).any() or torch.isinf(depth_tensor).any() or depth_tensor.min() == depth_tensor.max() + + if depth_tensor is not None: + debug_print(f"Depth_T.min: {depth_tensor.min()}, Depth_T.max: {depth_tensor.max()}") + # if invalid, create flat z for this frame + if depth_tensor_invalid: + # if none, then 3D depth is turned off, so no warning is needed. + if depth_tensor is not None: + print("Depth tensor invalid. Generating a Flat depth for this frame.") + # create flat depth + z = torch.ones_like(x) + # create z from depth tensor + else: + # prepare tensor between 0 and 1 with optional equalization and autocontrast + depth_normalized = prepare_depth_tensor(depth_tensor) + + # Rescale the depth values to depth with offset (depth 2 and offset -1 would be -1 to +11) + depth_final = depth_normalized * depth + depth_offset + + # depth factor (1 is normal. -1 is inverted) + if depth_factor != 1: + depth_final *= depth_factor + + # console reporting of depth normalization, min, max, diff + # will *only* print to console if Dev mode is enabled in general settings of Deforum + txt_depth_min, txt_depth_max = '{:.2f}'.format(float(depth_tensor.min())), '{:.2f}'.format(float(depth_tensor.max())) + diff = '{:.2f}'.format(float(depth_tensor.max()) - float(depth_tensor.min())) + console_txt = f"\033[36mDepth normalized to {depth_final.min()}/{depth_final.max()} from" + debug_print(f"{console_txt} {txt_depth_min}/{txt_depth_max} diff {diff}\033[0m") + + # add z from depth + z = torch.as_tensor(depth_final, dtype=torch.float32, device=device) + + # calculate offset_xy + xyz_old_world = torch.stack((x.flatten(), y.flatten(), z.flatten()), dim=1) + xyz_old_cam_xy = persp_cam_old.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2] + xyz_new_cam_xy = persp_cam_new.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2] + offset_xy = xyz_new_cam_xy - xyz_old_cam_xy + + # affine_grid theta param expects a batch of 2D mats. Each is 2x3 to do rotation+translation. + identity_2d_batch = torch.tensor([[1.,0.,0.],[0.,1.,0.]], device=device).unsqueeze(0) + + # coords_2d will have shape (N,H,W,2).. which is also what grid_sample needs. + coords_2d = torch.nn.functional.affine_grid(identity_2d_batch, [1,1,h,w], align_corners=False) + offset_coords_2d = coords_2d - torch.reshape(offset_xy, (h,w,2)).unsqueeze(0) + + # do the hyperdimensional remap + image_tensor = rearrange(torch.from_numpy(prev_img_cv2.astype(np.float32)), 'h w c -> c h w').to(device) + new_image = torch.nn.functional.grid_sample( + image_tensor.unsqueeze(0), # image_tensor.add(1/512 - 0.0001).unsqueeze(0), + offset_coords_2d, + mode=anim_args.sampling_mode, + padding_mode=anim_args.padding_mode, + align_corners=False + ) + + # convert back to cv2 style numpy array + result = rearrange( + new_image.squeeze().clamp(0,255), + 'c h w -> h w c' + ).cpu().numpy().astype(prev_img_cv2.dtype) + return result + +def prepare_depth_tensor(depth_tensor=None): + # Prepares a depth tensor with normalization & equalization between 0 and 1 + depth_range = depth_tensor.max() - depth_tensor.min() + depth_tensor = (depth_tensor - depth_tensor.min()) / depth_range + depth_tensor = depth_equalization(depth_tensor=depth_tensor) + return depth_tensor + +def depth_equalization(depth_tensor): + """ + Perform histogram equalization on a single-channel depth tensor. + + Args: + depth_tensor (torch.Tensor): A 2D depth tensor (H, W). + + Returns: + torch.Tensor: Equalized depth tensor (2D). + """ + + # Convert the depth tensor to a NumPy array for processing + depth_array = depth_tensor.cpu().numpy() + + # Calculate the histogram of the depth values using a specified number of bins + # Increase the number of bins for higher precision depth tensors + hist, bin_edges = np.histogram(depth_array, bins=1024, range=(0, 1)) + + # Calculate the cumulative distribution function (CDF) of the histogram + cdf = hist.cumsum() + + # Normalize the CDF so that the maximum value is 1 + cdf = cdf / float(cdf[-1]) + + # Perform histogram equalization by mapping the original depth values to the CDF values + equalized_depth_array = np.interp(depth_array, bin_edges[:-1], cdf) + + # Convert the equalized depth array back to a PyTorch tensor and return it + equalized_depth_tensor = torch.from_numpy(equalized_depth_array).to(depth_tensor.device) + + return equalized_depth_tensor diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/animation_key_frames.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/animation_key_frames.py new file mode 100644 index 0000000000000000000000000000000000000000..c742fcb49a16cb024ae8a42289b92c6ea4f56ed2 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/animation_key_frames.py @@ -0,0 +1,166 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import re +import numpy as np +import numexpr +import pandas as pd +from .prompt import check_is_number +from modules import scripts, shared + +class DeformAnimKeys(): + def __init__(self, anim_args, seed=-1): + self.fi = FrameInterpolater(anim_args.max_frames, seed) + self.angle_series = self.fi.parse_inbetweens(anim_args.angle, 'angle') + self.transform_center_x_series = self.fi.parse_inbetweens(anim_args.transform_center_x, 'transform_center_x') + self.transform_center_y_series = self.fi.parse_inbetweens(anim_args.transform_center_y, 'transform_center_y') + self.zoom_series = self.fi.parse_inbetweens(anim_args.zoom, 'zoom') + self.translation_x_series = self.fi.parse_inbetweens(anim_args.translation_x, 'translation_x') + self.translation_y_series = self.fi.parse_inbetweens(anim_args.translation_y, 'translation_y') + self.translation_z_series = self.fi.parse_inbetweens(anim_args.translation_z, 'translation_z') + self.rotation_3d_x_series = self.fi.parse_inbetweens(anim_args.rotation_3d_x, 'rotation_3d_x') + self.rotation_3d_y_series = self.fi.parse_inbetweens(anim_args.rotation_3d_y, 'rotation_3d_y') + self.rotation_3d_z_series = self.fi.parse_inbetweens(anim_args.rotation_3d_z, 'rotation_3d_z') + self.perspective_flip_theta_series = self.fi.parse_inbetweens(anim_args.perspective_flip_theta, 'perspective_flip_theta') + self.perspective_flip_phi_series = self.fi.parse_inbetweens(anim_args.perspective_flip_phi, 'perspective_flip_phi') + self.perspective_flip_gamma_series = self.fi.parse_inbetweens(anim_args.perspective_flip_gamma, 'perspective_flip_gamma') + self.perspective_flip_fv_series = self.fi.parse_inbetweens(anim_args.perspective_flip_fv, 'perspective_flip_fv') + self.noise_schedule_series = self.fi.parse_inbetweens(anim_args.noise_schedule, 'noise_schedule') + self.strength_schedule_series = self.fi.parse_inbetweens(anim_args.strength_schedule, 'strength_schedule') + self.contrast_schedule_series = self.fi.parse_inbetweens(anim_args.contrast_schedule, 'contrast_schedule') + self.cfg_scale_schedule_series = self.fi.parse_inbetweens(anim_args.cfg_scale_schedule, 'cfg_scale_schedule') + self.ddim_eta_schedule_series = self.fi.parse_inbetweens(anim_args.ddim_eta_schedule, 'ddim_eta_schedule') + self.ancestral_eta_schedule_series = self.fi.parse_inbetweens(anim_args.ancestral_eta_schedule, 'ancestral_eta_schedule') + self.pix2pix_img_cfg_scale_series = self.fi.parse_inbetweens(anim_args.pix2pix_img_cfg_scale_schedule, 'pix2pix_img_cfg_scale_schedule') + self.subseed_schedule_series = self.fi.parse_inbetweens(anim_args.subseed_schedule, 'subseed_schedule') + self.subseed_strength_schedule_series = self.fi.parse_inbetweens(anim_args.subseed_strength_schedule, 'subseed_strength_schedule') + self.checkpoint_schedule_series = self.fi.parse_inbetweens(anim_args.checkpoint_schedule, 'checkpoint_schedule', is_single_string = True) + self.steps_schedule_series = self.fi.parse_inbetweens(anim_args.steps_schedule, 'steps_schedule') + self.seed_schedule_series = self.fi.parse_inbetweens(anim_args.seed_schedule, 'seed_schedule') + self.sampler_schedule_series = self.fi.parse_inbetweens(anim_args.sampler_schedule, 'sampler_schedule', is_single_string = True) + self.clipskip_schedule_series = self.fi.parse_inbetweens(anim_args.clipskip_schedule, 'clipskip_schedule') + self.noise_multiplier_schedule_series = self.fi.parse_inbetweens(anim_args.noise_multiplier_schedule, 'noise_multiplier_schedule') + self.mask_schedule_series = self.fi.parse_inbetweens(anim_args.mask_schedule, 'mask_schedule', is_single_string = True) + self.noise_mask_schedule_series = self.fi.parse_inbetweens(anim_args.noise_mask_schedule, 'noise_mask_schedule', is_single_string = True) + self.kernel_schedule_series = self.fi.parse_inbetweens(anim_args.kernel_schedule, 'kernel_schedule') + self.sigma_schedule_series = self.fi.parse_inbetweens(anim_args.sigma_schedule, 'sigma_schedule') + self.amount_schedule_series = self.fi.parse_inbetweens(anim_args.amount_schedule, 'amount_schedule') + self.threshold_schedule_series = self.fi.parse_inbetweens(anim_args.threshold_schedule, 'threshold_schedule') + self.aspect_ratio_series = self.fi.parse_inbetweens(anim_args.aspect_ratio_schedule, 'aspect_ratio_schedule') + self.fov_series = self.fi.parse_inbetweens(anim_args.fov_schedule, 'fov_schedule') + self.near_series = self.fi.parse_inbetweens(anim_args.near_schedule, 'near_schedule') + self.cadence_flow_factor_schedule_series = self.fi.parse_inbetweens(anim_args.cadence_flow_factor_schedule, 'cadence_flow_factor_schedule') + self.redo_flow_factor_schedule_series = self.fi.parse_inbetweens(anim_args.redo_flow_factor_schedule, 'redo_flow_factor_schedule') + self.far_series = self.fi.parse_inbetweens(anim_args.far_schedule, 'far_schedule') + self.hybrid_comp_alpha_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_comp_alpha_schedule, 'hybrid_comp_alpha_schedule') + self.hybrid_comp_mask_blend_alpha_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_comp_mask_blend_alpha_schedule, 'hybrid_comp_mask_blend_alpha_schedule') + self.hybrid_comp_mask_contrast_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_comp_mask_contrast_schedule, 'hybrid_comp_mask_contrast_schedule') + self.hybrid_comp_mask_auto_contrast_cutoff_high_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_comp_mask_auto_contrast_cutoff_high_schedule, 'hybrid_comp_mask_auto_contrast_cutoff_high_schedule') + self.hybrid_comp_mask_auto_contrast_cutoff_low_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_comp_mask_auto_contrast_cutoff_low_schedule, 'hybrid_comp_mask_auto_contrast_cutoff_low_schedule') + self.hybrid_flow_factor_schedule_series = self.fi.parse_inbetweens(anim_args.hybrid_flow_factor_schedule, 'hybrid_flow_factor_schedule') + +class ControlNetKeys(): + def __init__(self, anim_args, controlnet_args): + self.fi = FrameInterpolater(max_frames=anim_args.max_frames) + self.schedules = {} + max_models = shared.opts.data.get("control_net_unit_count", shared.opts.data.get("control_net_max_models_num", 5)) + num_of_models = 5 + num_of_models = num_of_models if max_models <= 5 else max_models + for i in range(1, num_of_models + 1): + for suffix in ['weight', 'guidance_start', 'guidance_end']: + prefix = f"cn_{i}" + input_key = f"{prefix}_{suffix}" + output_key = f"{input_key}_schedule_series" + self.schedules[output_key] = self.fi.parse_inbetweens(getattr(controlnet_args, input_key), input_key) + setattr(self, output_key, self.schedules[output_key]) + +class LooperAnimKeys(): + def __init__(self, loop_args, anim_args, seed): + self.fi = FrameInterpolater(anim_args.max_frames, seed) + self.use_looper = loop_args.use_looper + self.imagesToKeyframe = loop_args.init_images + self.image_strength_schedule_series = self.fi.parse_inbetweens(loop_args.image_strength_schedule, 'image_strength_schedule') + self.blendFactorMax_series = self.fi.parse_inbetweens(loop_args.blendFactorMax, 'blendFactorMax') + self.blendFactorSlope_series = self.fi.parse_inbetweens(loop_args.blendFactorSlope, 'blendFactorSlope') + self.tweening_frames_schedule_series = self.fi.parse_inbetweens(loop_args.tweening_frames_schedule, 'tweening_frames_schedule') + self.color_correction_factor_series = self.fi.parse_inbetweens(loop_args.color_correction_factor, 'color_correction_factor') + +class FrameInterpolater(): + def __init__(self, max_frames=0, seed=-1) -> None: + self.max_frames = max_frames + self.seed = seed + + def parse_inbetweens(self, value, filename = 'unknown', is_single_string = False): + return self.get_inbetweens(self.parse_key_frames(value, filename = filename), filename = filename, is_single_string = is_single_string) + + def sanitize_value(self, value): + return value.replace("'","").replace('"',"").replace('(',"").replace(')',"") + + def get_inbetweens(self, key_frames, integer=False, interp_method='Linear', is_single_string = False, filename = 'unknown'): + key_frame_series = pd.Series([np.nan for a in range(self.max_frames)]) + # get our ui variables set for numexpr.evaluate + max_f = self.max_frames -1 + s = self.seed + for i in range(0, self.max_frames): + if i in key_frames: + value = key_frames[i] + sanitized_value = self.sanitize_value(value) + value_is_number = check_is_number(sanitized_value) + if value_is_number: # if it's only a number, leave the rest for the default interpolation + key_frame_series[i] = sanitized_value + if not value_is_number: + t = i + # workaround for values formatted like 0:("I am test") //used for sampler schedules + try: + key_frame_series[i] = numexpr.evaluate(value) if not is_single_string else sanitized_value + except SyntaxError as e: + e.filename = f"{filename}@frame#{i}" + raise e + elif is_single_string:# take previous string value and replicate it + key_frame_series[i] = key_frame_series[i-1] + key_frame_series = key_frame_series.astype(float) if not is_single_string else key_frame_series # as string + + if interp_method == 'Cubic' and len(key_frames.items()) <= 3: + interp_method = 'Quadratic' + if interp_method == 'Quadratic' and len(key_frames.items()) <= 2: + interp_method = 'Linear' + + key_frame_series[0] = key_frame_series[key_frame_series.first_valid_index()] + key_frame_series[self.max_frames-1] = key_frame_series[key_frame_series.last_valid_index()] + key_frame_series = key_frame_series.interpolate(method=interp_method.lower(), limit_direction='both') + if integer: + return key_frame_series.astype(int) + return key_frame_series + + def parse_key_frames(self, string, filename='unknown'): + # because math functions (i.e. sin(t)) can utilize brackets + # it extracts the value in form of some stuff + # which has previously been enclosed with brackets and + # with a comma or end of line existing after the closing one + frames = dict() + for match_object in string.split(","): + frameParam = match_object.split(":") + max_f = self.max_frames -1 + s = self.seed + try: + frame = int(self.sanitize_value(frameParam[0])) if check_is_number(self.sanitize_value(frameParam[0].strip())) else int(numexpr.evaluate(frameParam[0].strip().replace("'","",1).replace('"',"",1)[::-1].replace("'","",1).replace('"',"",1)[::-1])) + frames[frame] = frameParam[1].strip() + except SyntaxError as e: + e.filename = filename + raise e + if frames == {} and len(string) != 0: + raise RuntimeError('Key Frame string not correctly formatted') + return frames \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/args.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/args.py new file mode 100644 index 0000000000000000000000000000000000000000..3561640fd1c32b7fc3e5fd62b7c50a6d35ae7f1a --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/args.py @@ -0,0 +1,1179 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import json +import os +import tempfile +import time +from types import SimpleNamespace +import modules.paths as ph +import modules.shared as sh +from modules.processing import get_fixed_seed +from .defaults import get_guided_imgs_default_json, mask_fill_choices, get_samplers_list +from .deforum_controlnet import controlnet_component_names +from .general_utils import get_os, substitute_placeholders + +from PIL import Image +import pathlib + +def RootArgs(): + return { + "device": sh.device, + "models_path": ph.models_path + '/Deforum', + "half_precision": not sh.cmd_opts.no_half, + "clipseg_model": None, + "mask_preset_names": ['everywhere', 'video_mask'], + "frames_cache": [], + "raw_batch_name": None, + "raw_seed": None, + "timestring": "", + "subseed": -1, + "subseed_strength": 0, + "seed_internal": 0, + "init_sample": None, + "noise_mask": None, + "initial_info": None, + "first_frame": None, + "animation_prompts": None, + "current_user_os": get_os(), + "tmp_deforum_run_duplicated_folder": os.path.join(tempfile.gettempdir(), 'tmp_run_deforum') + } + +# 'Midas-3.1-BeitLarge' is temporarily removed until fixed. Can add it back anytime as it's supported in the back-end depth code +def DeforumAnimArgs(): + return { + "animation_mode": { + "label": "Animation mode", + "type": "radio", + "choices": ['2D', '3D', 'Video Input', 'Interpolation'], + "value": "2D", + "info": "control animation mode, will hide non relevant params upon change" + }, + "max_frames": { + "label": "Max frames", + "type": "number", + "precision": 0, + "value": 120, + "info": "end the animation at this frame number", + }, + "border": { + "label": "Border mode", + "type": "radio", + "choices": ['replicate', 'wrap'], + "value": "replicate", + "info": "controls pixel generation method for images smaller than the frame. hover on the options to see more info" + }, + "angle": { + "label": "Angle", + "type": "textbox", + "value": "0: (0)", + "info": "rotate canvas clockwise/anticlockwise in degrees per frame" + }, + + "zoom": { + "label": "Zoom", + "type": "textbox", + "value": "0: (1.0025+0.002*sin(1.25*3.14*t/30))", + "info": "scale the canvas size, multiplicatively. [static = 1.0]" + }, + + "translation_x": { + "label": "Translation X", + "type": "textbox", + "value": "0: (0)", + "info": "move canvas left/right in pixels per frame" + }, + + "translation_y": { + "label": "Translation Y", + "type": "textbox", + "value": "0: (0)", + "info": "move canvas up/down in pixels per frame" + }, + "translation_z": { + "label": "Translation Z", + "type": "textbox", + "value": "0: (1.75)", + "info": "move canvas towards/away from view [speed set by FOV]" + }, + "transform_center_x": { + "label": "Transform Center X", + "type": "textbox", + "value": "0: (0.5)", + "info": "X center axis for 2D angle/zoom" + }, + + "transform_center_y": { + "label": "Transform Center Y", + "type": "textbox", + "value": "0: (0.5)", + "info": "Y center axis for 2D angle/zoom" + }, + "rotation_3d_x": { + "label": "Rotation 3D X", + "type": "textbox", + "value": "0: (0)", + "info": "tilt canvas up/down in degrees per frame" + }, + "rotation_3d_y": { + "label": "Rotation 3D Y", + "type": "textbox", + "value": "0: (0)", + "info": "pan canvas left/right in degrees per frame" + }, + "rotation_3d_z": { + "label": "Rotation 3D Z", + "type": "textbox", + "value": "0: (0)", + "info": "roll canvas clockwise/anticlockwise" + }, + "enable_perspective_flip": { + "label": "Enable perspective flip", + "type": "checkbox", + "value": False, + "info": "" + }, + "perspective_flip_theta": { + "label": "Perspective flip theta", + "type": "textbox", + "value": "0: (0)", + "info": "" + }, + "perspective_flip_phi": { + "label": "Perspective flip phi", + "type": "textbox", + "value": "0: (0)", + "info": "" + }, + "perspective_flip_gamma": { + "label": "Perspective flip gamma", + "type": "textbox", + "value": "0: (0)", + "info": "" + }, + "perspective_flip_fv": { + "label": "Perspective flip tv", + "type": "textbox", + "value": "0: (53)", + "info": "the 2D vanishing point of perspective (rec. range 30-160)" + }, + "noise_schedule": { + "label": "Noise schedule", + "type": "textbox", + "value": "0: (0.065)", + "info": "" + }, + "strength_schedule": { + "label": "Strength schedule", + "type": "textbox", + "value": "0: (0.65)", + "info": "amount of presence of previous frame to influence next frame, also controls steps in the following formula [steps - (strength_schedule * steps)]" + }, + "contrast_schedule": "0: (1.0)", + "cfg_scale_schedule": { + "label": "CFG scale schedule", + "type": "textbox", + "value": "0: (7)", + "info": "how closely the image should conform to the prompt. Lower values produce more creative results. (recommended range 5-15)`" + }, + "enable_steps_scheduling": { + "label": "Enable steps scheduling", + "type": "checkbox", + "value": False, + "info": "" + }, + "steps_schedule": { + "label": "Steps schedule", + "type": "textbox", + "value": "0: (25)", + "info": "mainly allows using more than 200 steps. Otherwise, it's a mirror-like param of 'strength schedule'" + }, + "fov_schedule": { + "label": "FOV schedule", + "type": "textbox", + "value": "0: (70)", + "info": "adjusts the scale at which the canvas is moved in 3D by the translation_z value. [Range -180 to +180, with 0 being undefined. Values closer to 180 will make the image have less depth, while values closer to 0 will allow more depth]" + }, + "aspect_ratio_schedule": { + "label": "Aspect Ratio schedule", + "type": "textbox", + "value": "0: (1)", + "info": "adjusts the aspect ratio for the depth calculations" + }, + "aspect_ratio_use_old_formula": { + "label": "Use old aspect ratio formula", + "type": "checkbox", + "value": False, + "info": "for backward compatibility. Uses the formula: `width/height`" + }, + "near_schedule": { + "label": "Near schedule", + "type": "textbox", + "value": "0: (200)", + "info": "" + }, + "far_schedule": { + "label": "Far schedule", + "type": "textbox", + "value": "0: (10000)", + "info": "" + }, + "seed_schedule": { + "label": "Seed schedule", + "type": "textbox", + "value": '0:(s), 1:(-1), "max_f-2":(-1), "max_f-1":(s)', + "info": "" + }, + "pix2pix_img_cfg_scale_schedule": { + "label": "Pix2Pix img CFG schedule", + "type": "textbox", + "value": "0:(1.5)", + "info": "ONLY in use when working with a P2P ckpt!" + }, + "enable_subseed_scheduling": { + "label": "Enable Subseed scheduling", + "type": "checkbox", + "value": False, + "info": "" + }, + "subseed_schedule": { + "label": "Subseed schedule", + "type": "textbox", + "value": "0: (1)", + "info": "" + }, + "subseed_strength_schedule": { + "label": "Subseed strength schedule", + "type": "textbox", + "value": "0: (0)", + "info": "" + }, + "enable_sampler_scheduling": { + "label": "Enable sampler scheduling", + "type": "checkbox", + "value": False, + "info": "" + }, + "sampler_schedule": { + "label": "Sampler schedule", + "type": "textbox", + "value": '0: ("Euler a")', + "info": "allows keyframing of samplers. Use names as they appear in ui dropdown in 'run' tab" + }, + "use_noise_mask": { + "label": "Use noise mask", + "type": "checkbox", + "value": False, + "info": "" + }, + "mask_schedule": { + "label": "Mask schedule", + "type": "textbox", + "value": '0: ("{video_mask}")', + "info": "" + }, + "noise_mask_schedule": { + "label": "Noise mask schedule", + "type": "textbox", + "value": '0: ("{video_mask}")', + "info": "" + }, + "enable_checkpoint_scheduling": { + "label": "Enable checkpoint scheduling", + "type": "checkbox", + "value": False, + "info": "" + }, + "checkpoint_schedule": { + "label": "allows keyframing different sd models. Use *full* name as appears in ui dropdown", + "type": "textbox", + "value": '0: ("model1.ckpt"), 100: ("model2.safetensors")', + "info": "allows keyframing different sd models. Use *full* name as appears in ui dropdown" + }, + "enable_clipskip_scheduling": { + "label": "Enable CLIP skip scheduling", + "type": "checkbox", + "value": False, + "info": "" + }, + "clipskip_schedule": { + "label": "CLIP skip schedule", + "type": "textbox", + "value": "0: (2)", + "info": "" + }, + "enable_noise_multiplier_scheduling": { + "label": "Enable noise multiplier scheduling", + "type": "checkbox", + "value": True, + "info": "" + }, + "noise_multiplier_schedule": { + "label": "Noise multiplier schedule", + "type": "textbox", + "value": "0: (1.05)", + "info": "" + }, + "resume_from_timestring": { + "label": "Resume from timestring", + "type": "checkbox", + "value": False, + "info": "" + }, + "resume_timestring": { + "label": "Resume timestring", + "type": "textbox", + "value": "20230129210106", + "info": "" + }, + "enable_ddim_eta_scheduling": { + "label": "Enable DDIM ETA scheduling", + "type": "checkbox", + "value": False, + "visible": False, + "info": "noise multiplier; higher = more unpredictable results" + }, + "ddim_eta_schedule": { + "label": "DDIM ETA Schedule", + "type": "textbox", + "value": "0: (0)", + "visible": False, + "info": "" + }, + "enable_ancestral_eta_scheduling": { + "label": "Enable Ancestral ETA scheduling", + "type": "checkbox", + "value": False, + "info": "noise multiplier; applies to Euler A and other samplers that have the letter 'a' in them" + }, + "ancestral_eta_schedule": { + "label": "Ancestral ETA Schedule", + "type": "textbox", + "value": "0: (1)", + "visible": False, + "info": "" + }, + "amount_schedule": { + "label": "Amount schedule", + "type": "textbox", + "value": "0: (0.1)", + "info": "" + }, + "kernel_schedule": { + "label": "Kernel schedule", + "type": "textbox", + "value": "0: (5)", + "info": "" + }, + "sigma_schedule": { + "label": "Sigma schedule", + "type": "textbox", + "value": "0: (1)", + "info": "" + }, + "threshold_schedule": { + "label": "Threshold schedule", + "type": "textbox", + "value": "0: (0)", + "info": "" + }, + "color_coherence": { + "label": "Color coherence", + "type": "dropdown", + "choices": ['None', 'HSV', 'LAB', 'RGB', 'Video Input', 'Image'], + "value": "LAB", + "info": "choose an algorithm/ method for keeping color coherence across the animation" + }, + "color_coherence_image_path": { + "label": "Color coherence image path", + "type": "textbox", + "value": "", + "info": "" + }, + "color_coherence_video_every_N_frames": { + "label": "Color coherence video every N frames", + "type": "number", + "precision": 0, + "value": 1, + "info": "", + }, + "color_force_grayscale": { + "label": "Color force Grayscale", + "type": "checkbox", + "value": False, + "info": "force all frames to be in grayscale" + }, + "legacy_colormatch": { + "label": "Legacy colormatch", + "type": "checkbox", + "value": False, + "info": "apply colormatch before adding noise (use with CN's Tile)" + }, + "diffusion_cadence": { + "label": "Cadence", + "type": "slider", + "minimum": 1, + "maximum": 50, + "step": 1, + "value": 1, + "info": "# of in-between frames that will not be directly diffused" + }, + "optical_flow_cadence": { + "label": "Optical flow cadence", + "type": "dropdown", + "choices": ['None', 'RAFT', 'DIS Medium', 'DIS Fine', 'Farneback'], + "value": "None", + "info": "use optical flow estimation for your in-between (cadence) frames" + }, + "cadence_flow_factor_schedule": { + "label": "Cadence flow factor schedule", + "type": "textbox", + "value": "0: (1)", + "info": "" + }, + "optical_flow_redo_generation": { + "label": "Optical flow generation", + "type": "dropdown", + "choices": ['None', 'RAFT', 'DIS Medium', 'DIS Fine', 'Farneback'], + "value": "None", + "info": "this option takes twice as long because it generates twice in order to capture the optical flow from the previous image to the first generation, then warps the previous image and redoes the generation" + }, + "redo_flow_factor_schedule": { + "label": "Generation flow factor schedule", + "type": "textbox", + "value": "0: (1)", + "info": "" + }, + "diffusion_redo": '0', + "noise_type": { + "label": "Noise type", + "type": "radio", + "choices": ['uniform', 'perlin'], + "value": "perlin", + "info": "" + }, + "perlin_w": { + "label": "Perlin W", + "type": "slider", + "minimum": 0.1, + "maximum": 16, + "step": 0.1, + "value": 8, + "visible": False + }, + "perlin_h": { + "label": "Perlin H", + "type": "slider", + "minimum": 0.1, + "maximum": 16, + "step": 0.1, + "value": 8, + "visible": False + }, + "perlin_octaves": { + "label": "Perlin octaves", + "type": "slider", + "minimum": 1, + "maximum": 7, + "step": 1, + "value": 4 + }, + "perlin_persistence": { + "label": "Perlin persistence", + "type": "slider", + "minimum": 0, + "maximum": 1, + "step": 0.02, + "value": 0.5 + }, + "use_depth_warping": { + "label": "Use depth warping", + "type": "checkbox", + "value": True, + "info": "" + }, + "depth_algorithm": { + "label": "Depth Algorithm", + "type": "dropdown", + "choices": ['Midas+AdaBins (old)', 'Zoe+AdaBins (old)', 'Midas-3-Hybrid', 'AdaBins', 'Zoe', 'Leres'], + "value": "Midas-3-Hybrid", + "info": "choose an algorithm/ method for keeping color coherence across the animation" + }, + "midas_weight": { + "label": "MiDaS/Zoe weight", + "type": "number", + "precision": None, + "value": 0.2, + "info": "sets a midpoint at which a depth-map is to be drawn: range [-1 to +1]", + "visible": False + }, + "padding_mode": { + "label": "Padding mode", + "type": "radio", + "choices": ['border', 'reflection', 'zeros'], + "value": "border", + "info": "controls the handling of pixels outside the field of view as they come into the scene" + }, + "sampling_mode": { + "label": "Padding mode", + "type": "radio", + "choices": ['bicubic', 'bilinear', 'nearest'], + "value": "bicubic", + "info": "" + }, + "save_depth_maps": { + "label": "Save 3D depth maps", + "type": "checkbox", + "value": False, + "info": "save animation's depth maps as extra files" + }, + "video_init_path": { + "label": "Video init path/ URL", + "type": "textbox", + "value": 'https://deforum.github.io/a1/V1.mp4', + "info": "" + }, + "extract_nth_frame": { + "label": "Extract nth frame", + "type": "number", + "precision": 0, + "value": 1, + "info": "" + }, + "extract_from_frame": { + "label": "Extract from frame", + "type": "number", + "precision": 0, + "value": 0, + "info": "" + }, + "extract_to_frame": { + "label": "Extract to frame", + "type": "number", + "precision": 0, + "value": -1, + "info": "" + }, + "overwrite_extracted_frames": { + "label": "Overwrite extracted frames", + "type": "checkbox", + "value": False, + "info": "" + }, + "use_mask_video": { + "label": "Use mask video", + "type": "checkbox", + "value": False, + "info": "" + }, + "video_mask_path": { + "label": "Video mask path", + "type": "textbox", + "value": 'https://deforum.github.io/a1/VM1.mp4', + "info": "" + }, + "hybrid_comp_alpha_schedule": { + "label": "Comp alpha schedule", + "type": "textbox", + "value": "0:(0.5)", + "info": "" + }, + "hybrid_comp_mask_blend_alpha_schedule": { + "label": "Comp mask blend alpha schedule", + "type": "textbox", + "value": "0:(0.5)", + "info": "" + }, + "hybrid_comp_mask_contrast_schedule": { + "label": "Comp mask contrast schedule", + "type": "textbox", + "value": "0:(1)", + "info": "" + }, + "hybrid_comp_mask_auto_contrast_cutoff_high_schedule": { + "label": "Comp mask auto contrast cutoff high schedule", + "type": "textbox", + "value": "0:(100)", + "info": "" + }, + "hybrid_comp_mask_auto_contrast_cutoff_low_schedule": { + "label": "Comp mask auto contrast cutoff low schedule", + "type": "textbox", + "value": "0:(0)", + "info": "" + }, + "hybrid_flow_factor_schedule": { + "label": "Flow factor schedule", + "type": "textbox", + "value": "0:(1)", + "info": "" + }, + "hybrid_generate_inputframes": { + "label": "Generate inputframes", + "type": "checkbox", + "value": False, + "info": "" + }, + "hybrid_generate_human_masks": { + "label": "Generate human masks", + "type": "radio", + "choices": ['None', 'PNGs', 'Video', 'Both'], + "value": "None", + "info": "" + }, + "hybrid_use_first_frame_as_init_image": { + "label": "First frame as init image", + "type": "checkbox", + "value": True, + "info": "", + "visible": False + }, + "hybrid_motion": { + "label": "Hybrid motion", + "type": "radio", + "choices": ['None', 'Optical Flow', 'Perspective', 'Affine'], + "value": "None", + "info": "" + }, + "hybrid_motion_use_prev_img": { + "label": "Motion use prev img", + "type": "checkbox", + "value": False, + "info": "", + "visible": False + }, + "hybrid_flow_consistency": { + "label": "Flow consistency mask", + "type": "checkbox", + "value": False, + "info": "", + "visible": False + }, + "hybrid_consistency_blur": { + "label": "Consistency mask blur", + "type": "slider", + "minimum": 0, + "maximum": 16, + "step": 1, + "value": 2, + "visible": False + }, + "hybrid_flow_method": { + "label": "Flow method", + "type": "radio", + "choices": ['RAFT', 'DIS Medium', 'DIS Fine', 'Farneback'], + "value": "RAFT", + "info": "", + "visible": False + }, + "hybrid_composite": 'None', # ['None', 'Normal', 'Before Motion', 'After Generation'] + "hybrid_use_init_image": { + "label": "Use init image as video", + "type": "checkbox", + "value": False, + "info": "", + }, + "hybrid_comp_mask_type": { + "label": "Comp mask type", + "type": "radio", + "choices": ['None', 'Depth', 'Video Depth', 'Blend', 'Difference'], + "value": "None", + "info": "", + "visible": False + }, + "hybrid_comp_mask_inverse": False, + "hybrid_comp_mask_equalize": { + "label": "Comp mask equalize", + "type": "radio", + "choices": ['None', 'Before', 'After', 'Both'], + "value": "None", + "info": "", + }, + "hybrid_comp_mask_auto_contrast": False, + "hybrid_comp_save_extra_frames": False + } + +def DeforumArgs(): + return { + "W": { + "label": "Width", + "type": "slider", + "minimum": 64, + "maximum": 2048, + "step": 64, + "value": 512, + }, + "H": { + "label": "Height", + "type": "slider", + "minimum": 64, + "maximum": 2048, + "step": 64, + "value": 512, + }, + "show_info_on_ui": True, + "tiling": { + "label": "Tiling", + "type": "checkbox", + "value": False, + "info": "enable for seamless-tiling of each generated image. Experimental" + }, + "restore_faces": { + "label": "Restore faces", + "type": "checkbox", + "value": False, + "info": "enable to trigger webui's face restoration on each frame during the generation" + }, + "seed_resize_from_w": { + "label": "Resize seed from width", + "type": "slider", + "minimum": 0, + "maximum": 2048, + "step": 64, + "value": 0, + }, + "seed_resize_from_h": { + "label": "Resize seed from height", + "type": "slider", + "minimum": 0, + "maximum": 2048, + "step": 64, + "value": 0, + }, + "seed": { + "label": "Seed", + "type": "number", + "precision": 0, + "value": -1, + "info": "Starting seed for the animation. -1 for random" + }, + "sampler": { + "label": "Sampler", + "type": "dropdown", + "choices": get_samplers_list().values(), + "value": "Euler a", + }, + "steps": { + "label": "Steps", + "type": "slider", + "minimum": 1, + "maximum": 200, + "step": 1, + "value": 25, + }, + "batch_name": { + "label": "Batch name", + "type": "textbox", + "value": "Deforum_{timestring}", + "info": "output images will be placed in a folder with this name ({timestring} token will be replaced) inside the img2img output folder. Supports params placeholders. e.g {seed}, {w}, {h}, {prompts}" + }, + "seed_behavior": { + "label": "Seed behavior", + "type": "radio", + "choices": ['iter', 'fixed', 'random', 'ladder', 'alternate', 'schedule'], + "value": "iter", + "info": "controls the seed behavior that is used for animation. Hover on the options to see more info" + }, + "seed_iter_N": { + "label": "Seed iter N", + "type": "number", + "precision": 0, + "value": 1, + "info": "for how many frames the same seed should stick before iterating to the next one" + }, + "use_init": { + "label": "Use init", + "type": "checkbox", + "value": False, + "info": "" + }, + "strength": { + "label": "strength", + "type": "slider", + "minimum": 0, + "maximum": 1, + "step": 0.01, + "value": 0.8, + }, + "strength_0_no_init": { + "label": "Strength 0 no init", + "type": "checkbox", + "value": True, + "info": "" + }, + "init_image": { + "label": "Init image URL", + "type": "textbox", + "value": "https://deforum.github.io/a1/I1.png", + "info": "Use web address or local path. Note: if the image box below is used then this field is ignored." + }, + "init_image_box": { + "label": "Init image box", + "type": "image", + "type_param": "pil", + "source": "upload", + "interactive": True, + "info": "" + }, + "use_mask": { + "label": "Use mask", + "type": "checkbox", + "value": False, + "info": "" + }, + "use_alpha_as_mask": { + "label": "Use alpha as mask", + "type": "checkbox", + "value": False, + "info": "" + }, + "mask_file": { + "label": "Mask file", + "type": "textbox", + "value": "https://deforum.github.io/a1/M1.jpg", + "info": "" + }, + "invert_mask": { + "label": "Invert mask", + "type": "checkbox", + "value": False, + "info": "" + }, + "mask_contrast_adjust": { + "label": "Mask contrast adjust", + "type": "number", + "precision": None, + "value": 1.0, + "info": "" + }, + "mask_brightness_adjust": { + "label": "Mask brightness adjust", + "type": "number", + "precision": None, + "value": 1.0, + "info": "" + }, + "overlay_mask": { + "label": "Overlay mask", + "type": "checkbox", + "value": True, + "info": "" + }, + "mask_overlay_blur": { + "label": "Mask overlay blur", + "type": "slider", + "minimum": 0, + "maximum": 64, + "step": 1, + "value": 4, + }, + "fill": { + "label": "Mask fill", + "type": "radio", + "type_param": "index", + "choices": ['fill', 'original', 'latent noise', 'latent nothing'], + "value": 'original', + "info": "" + }, + "full_res_mask": { + "label": "Full res mask", + "type": "checkbox", + "value": True, + "info": "" + }, + "full_res_mask_padding": { + "label": "Full res mask padding", + "type": "slider", + "minimum": 0, + "maximum": 512, + "step": 1, + "value": 4, + }, + "reroll_blank_frames": { + "label": "Reroll blank frames", + "type": "radio", + "choices": ['reroll', 'interrupt', 'ignore'], + "value": "ignore", + "info": "" + }, + "reroll_patience": { + "label": "Reroll patience", + "type": "number", + "precision": None, + "value": 10, + "info": "" + }, + "motion_preview_mode": { + "label": "Motion preview mode (dry run).", + "type": "checkbox", + "value": False, + "info": "Preview motion only. Uses a static picture for init, and draw motion reference rectangle." + }, + } + +def LoopArgs(): + return { + "use_looper": { + "label": "Enable guided images mode", + "type": "checkbox", + "value": False, + }, + "init_images": { + "label": "Images to use for keyframe guidance", + "type": "textbox", + "lines": 9, + "value": get_guided_imgs_default_json(), + }, + "image_strength_schedule": { + "label": "Image strength schedule", + "type": "textbox", + "value": "0:(0.75)", + }, + "blendFactorMax": { + "label": "Blend factor max", + "type": "textbox", + "value": "0:(0.35)", + }, + "blendFactorSlope": { + "label": "Blend factor slope", + "type": "textbox", + "value": "0:(0.25)", + }, + "tweening_frames_schedule": { + "label": "Tweening frames schedule", + "type": "textbox", + "value": "0:(20)", + }, + "color_correction_factor": { + "label": "Color correction factor", + "type": "textbox", + "value": "0:(0.075)", + } + } + +def ParseqArgs(): + return { + "parseq_manifest": { + "label": "Parseq Manifest (JSON or URL)", + "type": "textbox", + "lines": 4, + "value": None, + }, + "parseq_use_deltas": { + "label": "Use delta values for movement parameters", + "type": "checkbox", + "value": True, + } + } + +def DeforumOutputArgs(): + return { + "skip_video_creation": { + "label": "Skip video creation", + "type": "checkbox", + "value": False, + "info": "If enabled, only images will be saved" + }, + "fps": { + "label": "FPS", + "type": "slider", + "minimum": 1, + "maximum": 240, + "step": 1, + "value": 15, + }, + "make_gif": { + "label": "Make GIF", + "type": "checkbox", + "value": False, + "info": "make GIF in addition to the video/s" + }, + "delete_imgs": { + "label": "Delete Imgs", + "type": "checkbox", + "value": False, + "info": "auto-delete imgs when video is ready. Will break Resume from timestring!" + }, + "delete_input_frames": { + "label": "Delete All Inputframes", + "type": "checkbox", + "value": False, + "info": "auto-delete inputframes (incl CN ones) when video is ready" + }, + "image_path": { + "label": "Image path", + "type": "textbox", + "value": "C:/SD/20230124234916_%09d.png", + }, + "add_soundtrack": { + "label": "Add soundtrack", + "type": "radio", + "choices": ['None', 'File', 'Init Video'], + "value": "None", + "info": "add audio to video from file/url or init video" + }, + "soundtrack_path": { + "label": "Soundtrack path", + "type": "textbox", + "value": "https://deforum.github.io/a1/A1.mp3", + "info": "abs. path or url to audio file" + }, + "r_upscale_video": { + "label": "Upscale", + "type": "checkbox", + "value": False, + "info": "upscale output imgs when run is finished" + }, + "r_upscale_factor": { + "label": "Upscale factor", + "type": "dropdown", + "choices": ['x2', 'x3', 'x4'], + "value": "x2", + }, + "r_upscale_model": { + "label": "Upscale model", + "type": "dropdown", + "choices": ['realesr-animevideov3', 'realesrgan-x4plus', 'realesrgan-x4plus-anime'], + "value": 'realesr-animevideov3', + }, + "r_upscale_keep_imgs": { + "label": "Keep Imgs", + "type": "checkbox", + "value": True, + "info": "don't delete upscaled imgs", + }, + "store_frames_in_ram": { + "label": "Store frames in ram", + "type": "checkbox", + "value": False, + "info": "auto-delete imgs when video is ready", + "visible": False + }, + "frame_interpolation_engine": { + "label": "Engine", + "type": "radio", + "choices": ['None', 'RIFE v4.6', 'FILM'], + "value": "None", + "info": "select the frame interpolation engine. hover on the options for more info" + }, + "frame_interpolation_x_amount": { + "label": "Interp X", + "type": "slider", + "minimum": 2, + "maximum": 10, + "step": 1, + "value": 2, + }, + "frame_interpolation_slow_mo_enabled": { + "label": "Slow-Mo", + "type": "checkbox", + "value": False, + "visible": False, + "info": "Slow-Mo the interpolated video, audio will not be used if enabled", + }, + "frame_interpolation_slow_mo_amount": { + "label": "Slow-Mo X", + "type": "slider", + "minimum": 2, + "maximum": 10, + "step": 1, + "value": 2, + }, + "frame_interpolation_keep_imgs": { + "label": "Keep Imgs", + "type": "checkbox", + "value": False, + "info": "Keep interpolated images on disk", + "visible": False + }, + "frame_interpolation_use_upscaled": { + "label": "Use Upscaled", + "type": "checkbox", + "value": False, + "info": "Interpolate upscaled images, if available", + "visible": False + }, + + } + +def get_component_names(): + return ['override_settings_with_file', 'custom_settings_file', *DeforumAnimArgs().keys(), 'animation_prompts', 'animation_prompts_positive', 'animation_prompts_negative', + *DeforumArgs().keys(), *DeforumOutputArgs().keys(), *ParseqArgs().keys(), *LoopArgs().keys(), *controlnet_component_names()] + +def get_settings_component_names(): + return [name for name in get_component_names()] + +def pack_args(args_dict, keys_function): + return {name: args_dict[name] for name in keys_function()} + +def process_args(args_dict_main, run_id): + from .settings import load_args + override_settings_with_file = args_dict_main['override_settings_with_file'] + custom_settings_file = args_dict_main['custom_settings_file'] + p = args_dict_main['p'] + + root = SimpleNamespace(**RootArgs()) + args = SimpleNamespace(**{name: args_dict_main[name] for name in DeforumArgs()}) + anim_args = SimpleNamespace(**{name: args_dict_main[name] for name in DeforumAnimArgs()}) + video_args = SimpleNamespace(**{name: args_dict_main[name] for name in DeforumOutputArgs()}) + parseq_args = SimpleNamespace(**{name: args_dict_main[name] for name in ParseqArgs()}) + loop_args = SimpleNamespace(**{name: args_dict_main[name] for name in LoopArgs()}) + controlnet_args = SimpleNamespace(**{name: args_dict_main[name] for name in controlnet_component_names()}) + + root.animation_prompts = json.loads(args_dict_main['animation_prompts']) + + args_loaded_ok = True + if override_settings_with_file: + args_loaded_ok = load_args(args_dict_main, args, anim_args, parseq_args, loop_args, controlnet_args, video_args, custom_settings_file, root, run_id) + + positive_prompts = args_dict_main['animation_prompts_positive'] + negative_prompts = args_dict_main['animation_prompts_negative'] + negative_prompts = negative_prompts.replace('--neg', '') # remove --neg from negative_prompts if received by mistake + root.animation_prompts = {key: f"{positive_prompts} {val} {'' if '--neg' in val else '--neg'} {negative_prompts}" for key, val in root.animation_prompts.items()} + + if args.seed == -1: + root.raw_seed = -1 + args.seed = get_fixed_seed(args.seed) + if root.raw_seed != -1: + root.raw_seed = args.seed + root.timestring = time.strftime('%Y%m%d%H%M%S') + args.strength = max(0.0, min(1.0, args.strength)) + args.prompts = json.loads(args_dict_main['animation_prompts']) + args.positive_prompts = args_dict_main['animation_prompts_positive'] + args.negative_prompts = args_dict_main['animation_prompts_negative'] + + if not args.use_init and not anim_args.hybrid_use_init_image: + args.init_image = None + args.init_image_box = None + + elif anim_args.animation_mode == 'Video Input': + args.use_init = True + + current_arg_list = [args, anim_args, video_args, parseq_args, root] + full_base_folder_path = os.path.join(os.getcwd(), p.outpath_samples) + root.raw_batch_name = args.batch_name + args.batch_name = substitute_placeholders(args.batch_name, current_arg_list, full_base_folder_path) + args.outdir = os.path.join(p.outpath_samples, str(args.batch_name)) + args.outdir = os.path.join(os.getcwd(), args.outdir) + args.outdir = os.path.realpath(args.outdir) + os.makedirs(args.outdir, exist_ok=True) + + default_img = Image.open(os.path.join(pathlib.Path(__file__).parent.absolute(), '114763196.jpg')) + assert default_img is not None + default_img = default_img.resize((args.W,args.H)) + root.default_img = default_img + + return args_loaded_ok, root, args, anim_args, video_args, parseq_args, loop_args, controlnet_args diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/auto_navigation.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/auto_navigation.py new file mode 100644 index 0000000000000000000000000000000000000000..4d9b84cf7d5f3f92eceb0ac5778dadeeb39a8cdb --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/auto_navigation.py @@ -0,0 +1,88 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import numpy as np +import torch + +# reallybigname - auto-navigation functions in progress... +# usage: +# if auto_rotation: +# rot_mat = rotate_camera_towards_depth(depth_tensor, auto_rotation_steps, w, h, fov_deg, auto_rotation_depth_target) +def rotate_camera_towards_depth(depth_tensor, turn_weight, width, height, h_fov=60, target_depth=1): + # Compute the depth at the target depth + target_depth_index = int(target_depth * depth_tensor.shape[0]) + target_depth_values = depth_tensor[target_depth_index] + max_depth_index = torch.argmax(target_depth_values).item() + max_depth_index = (max_depth_index, target_depth_index) + max_depth = target_depth_values[max_depth_index[0]].item() + + # Compute the normalized x and y coordinates + x, y = max_depth_index + x_normalized = (x / (width - 1)) * 2 - 1 + y_normalized = (y / (height - 1)) * 2 - 1 + + # Calculate horizontal and vertical field of view (in radians) + h_fov_rad = np.radians(h_fov) + aspect_ratio = width / height + v_fov_rad = h_fov_rad / aspect_ratio + + # Calculate the world coordinates (x, y) at the target depth + x_world = np.tan(h_fov_rad / 2) * max_depth * x_normalized + y_world = np.tan(v_fov_rad / 2) * max_depth * y_normalized + + # Compute the target position using the world coordinates and max_depth + target_position = np.array([x_world, y_world, max_depth]) + + # Assuming the camera is initially at the origin, and looking in the negative Z direction + cam_position = np.array([0, 0, 0]) + current_direction = np.array([0, 0, -1]) + + # Compute the direction vector and normalize it + direction = target_position - cam_position + direction = direction / np.linalg.norm(direction) + + # Compute the rotation angle based on the turn_weight (number of frames) + axis = np.cross(current_direction, direction) + axis = axis / np.linalg.norm(axis) + angle = np.arcsin(np.linalg.norm(axis)) + max_angle = np.pi * (0.1 / turn_weight) # Limit the maximum rotation angle to half of the visible screen + rotation_angle = np.clip(np.sign(np.cross(current_direction, direction)) * angle / turn_weight, -max_angle, max_angle) + + # Compute the rotation matrix + rotation_matrix = np.eye(3) + np.sin(rotation_angle) * np.array([ + [0, -axis[2], axis[1]], + [axis[2], 0, -axis[0]], + [-axis[1], axis[0], 0] + ]) + (1 - np.cos(rotation_angle)) * np.outer(axis, axis) + + # Convert the NumPy array to a PyTorch tensor + rotation_matrix_tensor = torch.from_numpy(rotation_matrix).float() + + # Add an extra dimension to match the expected shape (1, 3, 3) + rotation_matrix_tensor = rotation_matrix_tensor.unsqueeze(0) + + return rotation_matrix_tensor + +def rotation_matrix(axis, angle): + axis = np.asarray(axis) + axis = axis / np.linalg.norm(axis) + a = np.cos(angle / 2.0) + b, c, d = -axis * np.sin(angle / 2.0) + aa, bb, cc, dd = a * a, b * b, c * c, d * d + bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d + return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], + [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], + [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/colors.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/colors.py new file mode 100644 index 0000000000000000000000000000000000000000..b95d9384a1888d69d09b774387097999724d65fc --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/colors.py @@ -0,0 +1,36 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import cv2 +import pkg_resources +from skimage.exposure import match_histograms + +def maintain_colors(prev_img, color_match_sample, mode): + + match_histograms_kwargs = {'channel_axis': -1} + + if mode == 'RGB': + return match_histograms(prev_img, color_match_sample, **match_histograms_kwargs) + elif mode == 'HSV': + prev_img_hsv = cv2.cvtColor(prev_img, cv2.COLOR_RGB2HSV) + color_match_hsv = cv2.cvtColor(color_match_sample, cv2.COLOR_RGB2HSV) + matched_hsv = match_histograms(prev_img_hsv, color_match_hsv, **match_histograms_kwargs) + return cv2.cvtColor(matched_hsv, cv2.COLOR_HSV2RGB) + else: # LAB + prev_img_lab = cv2.cvtColor(prev_img, cv2.COLOR_RGB2LAB) + color_match_lab = cv2.cvtColor(color_match_sample, cv2.COLOR_RGB2LAB) + matched_lab = match_histograms(prev_img_lab, color_match_lab, **match_histograms_kwargs) + return cv2.cvtColor(matched_lab, cv2.COLOR_LAB2RGB) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/composable_masks.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/composable_masks.py new file mode 100644 index 0000000000000000000000000000000000000000..368758368dbcab30d57e0d38348b99db83689750 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/composable_masks.py @@ -0,0 +1,212 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +# At the moment there are three types of masks: mask from variable, file mask and word mask +# Variable masks include video_mask (which can be set to auto-generated human masks) and everywhere +# They are put in {}-brackets +# Word masks are framed with <>-bracets, like: , +# File masks are put in []-brackes +# Empty strings are counted as the whole frame +# We want to put them all into a sequence of boolean operations + +# Example: +# \ +# (({human_mask} & [mask1.png]) ^ ) + +# Writing the parser for the boolean sequence +# using regex and PIL operations +import re +from .load_images import get_mask_from_file, check_mask_for_errors, blank_if_none +from .word_masking import get_word_mask +from PIL import ImageChops +from modules.shared import opts + +# val_masks: name, PIL Image mask +# Returns an image in mode '1' (needed for bool ops), convert to 'L' in the sender function +def compose_mask(root, args, mask_seq, val_masks, frame_image, inner_idx:int = 0): + # Compose_mask recursively: go to inner brackets, then b-op it and go upstack + + # Step 1: + # recursive parenthesis pass + # regex is not powerful here + + seq = "" + inner_seq = "" + parentheses_counter = 0 + + for c in mask_seq: + if c == ')': + parentheses_counter = parentheses_counter - 1 + if parentheses_counter > 0: + inner_seq += c + if c == '(': + parentheses_counter = parentheses_counter + 1 + if parentheses_counter == 0: + if len(inner_seq) > 0: + inner_idx += 1 + seq += compose_mask(root, args, inner_seq, val_masks, frame_image, inner_idx) + inner_seq = "" + else: + seq += c + + if parentheses_counter != 0: + raise Exception('Mismatched parentheses in {mask_seq}!') + + mask_seq = seq + + # Step 2: + # Load the word masks and file masks as vars + + # File masks + pattern = r'\[(?P[\S\s]*?)\]' + + def parse(match_object): + nonlocal inner_idx + inner_idx += 1 + content = match_object.groupdict()['inner'] + val_masks[str(inner_idx)] = get_mask_from_file(content, args).convert('1') # TODO: add caching + return f"{{{inner_idx}}}" + + mask_seq = re.sub(pattern, parse, mask_seq) + + # Word masks + pattern = r'<(?P[\S\s]*?)>' + + def parse(match_object): + nonlocal inner_idx + inner_idx += 1 + content = match_object.groupdict()['inner'] + val_masks[str(inner_idx)] = get_word_mask(root, frame_image, content).convert('1') + return f"{{{inner_idx}}}" + + mask_seq = re.sub(pattern, parse, mask_seq) + + # Now that all inner parenthesis are eliminated we're left with a linear string + + # Step 3: + # Boolean operations with masks + # Operators: invert !, and &, or |, xor ^, difference \ + + # Invert vars with '!' + pattern = r'![\S\s]*{(?P[\S\s]*?)}' + def parse(match_object): + nonlocal inner_idx + inner_idx += 1 + content = match_object.groupdict()['inner'] + savename = content + if content in root.mask_preset_names: + inner_idx += 1 + savename = str(inner_idx) + val_masks[savename] = ImageChops.invert(val_masks[content]) + return f"{{{savename}}}" + + mask_seq = re.sub(pattern, parse, mask_seq) + + # Multiply neighbouring vars with '&' + # Wait for replacements stall (like in Markov chains) + while True: + pattern = r'{(?P[\S\s]*?)}[\s]*&[\s]*{(?P[\S\s]*?)}' + def parse(match_object): + nonlocal inner_idx + inner_idx += 1 + content = match_object.groupdict()['inner1'] + content_second = match_object.groupdict()['inner2'] + savename = content + if content in root.mask_preset_names: + inner_idx += 1 + savename = str(inner_idx) + val_masks[savename] = ImageChops.logical_and(val_masks[content], val_masks[content_second]) + return f"{{{savename}}}" + + prev_mask_seq = mask_seq + mask_seq = re.sub(pattern, parse, mask_seq) + if mask_seq is prev_mask_seq: + break + + # Add neighbouring vars with '|' + while True: + pattern = r'{(?P[\S\s]*?)}[\s]*?\|[\s]*?{(?P[\S\s]*?)}' + def parse(match_object): + nonlocal inner_idx + inner_idx += 1 + content = match_object.groupdict()['inner1'] + content_second = match_object.groupdict()['inner2'] + savename = content + if content in root.mask_preset_names: + inner_idx += 1 + savename = str(inner_idx) + val_masks[savename] = ImageChops.logical_or(val_masks[content], val_masks[content_second]) + return f"{{{savename}}}" + + prev_mask_seq = mask_seq + mask_seq = re.sub(pattern, parse, mask_seq) + if mask_seq is prev_mask_seq: + break + + # Mutually exclude neighbouring vars with '^' + while True: + pattern = r'{(?P[\S\s]*?)}[\s]*\^[\s]*{(?P[\S\s]*?)}' + def parse(match_object): + nonlocal inner_idx + inner_idx += 1 + content = match_object.groupdict()['inner1'] + content_second = match_object.groupdict()['inner2'] + savename = content + if content in root.mask_preset_names: + inner_idx += 1 + savename = str(inner_idx) + val_masks[savename] = ImageChops.logical_xor(val_masks[content], val_masks[content_second]) + return f"{{{savename}}}" + + prev_mask_seq = mask_seq + mask_seq = re.sub(pattern, parse, mask_seq) + if mask_seq is prev_mask_seq: + break + + # Set-difference the regions with '\' + while True: + pattern = r'{(?P[\S\s]*?)}[\s]*\\[\s]*{(?P[\S\s]*?)}' + def parse(match_object): + content = match_object.groupdict()['inner1'] + content_second = match_object.groupdict()['inner2'] + savename = content + if content in root.mask_preset_names: + nonlocal inner_idx + inner_idx += 1 + savename = str(inner_idx) + val_masks[savename] = ImageChops.logical_and(val_masks[content], ImageChops.invert(val_masks[content_second])) + return f"{{{savename}}}" + + prev_mask_seq = mask_seq + mask_seq = re.sub(pattern, parse, mask_seq) + if mask_seq is prev_mask_seq: + break + + # Step 4: + # Output + # Now we should have a single var left to return. If not, raise an error message + pattern = r'{(?P[\S\s]*?)}' + matches = re.findall(pattern, mask_seq) + + if len(matches) != 1: + raise Exception(f'Wrong composable mask expression format! Broken mask sequence: {mask_seq}') + + return f"{{{matches[0]}}}" + +def compose_mask_with_check(root, args, mask_seq, val_masks, frame_image): + for k, v in val_masks.items(): + val_masks[k] = blank_if_none(v, args.W, args.H, '1').convert('1') + return check_mask_for_errors(val_masks[compose_mask(root, args, mask_seq, val_masks, frame_image, 0)[1:-1]].convert('L')) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/consistency_check.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/consistency_check.py new file mode 100644 index 0000000000000000000000000000000000000000..fac194e986bca855a1113c1fb24affd37c06c994 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/consistency_check.py @@ -0,0 +1,148 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +''' +Taken from https://github.com/Sxela/flow_tools/blob/main (GNU GPL Licensed), and modified to suit Deforum +''' +# import argparse +# import PIL.Image +import numpy as np +# import scipy.ndimage +# import glob +# from tqdm import tqdm + +def make_consistency(flow1, flow2, edges_unreliable=False): + # Awesome pythonic consistency check from [maua](https://github.com/maua-maua-maua/maua/blob/44485c745c65cf9d83cb1b1c792a177588e9c9fc/maua/flow/consistency.py) by Hans Brouwer and Henry Rachootin + # algorithm based on https://github.com/manuelruder/artistic-videos/blob/master/consistencyChecker/consistencyChecker.cpp + # reimplemented in numpy by Hans Brouwer + # // consistencyChecker + # // Check consistency of forward flow via backward flow. + # // (c) Manuel Ruder, Alexey Dosovitskiy, Thomas Brox 2016 + + flow1 = np.flip(flow1, axis=2) + flow2 = np.flip(flow2, axis=2) + h, w, _ = flow1.shape + + # get grid of coordinates for each pixel + orig_coord = np.flip(np.mgrid[:w, :h], 0).T + + # find where the flow1 maps each pixel + warp_coord = orig_coord + flow1 + + # clip the coordinates in bounds and round down + warp_coord_inbound = np.zeros_like(warp_coord) + warp_coord_inbound[..., 0] = np.clip(warp_coord[..., 0], 0, h - 2) + warp_coord_inbound[..., 1] = np.clip(warp_coord[..., 1], 0, w - 2) + warp_coord_floor = np.floor(warp_coord_inbound).astype(int) + + # for each pixel: bilinear interpolation of the corresponding flow2 values around the point mapped to by flow1 + alpha = warp_coord_inbound - warp_coord_floor + flow2_00 = flow2[warp_coord_floor[..., 0], warp_coord_floor[..., 1]] + flow2_01 = flow2[warp_coord_floor[..., 0], warp_coord_floor[..., 1] + 1] + flow2_10 = flow2[warp_coord_floor[..., 0] + 1, warp_coord_floor[..., 1]] + flow2_11 = flow2[warp_coord_floor[..., 0] + 1, warp_coord_floor[..., 1] + 1] + flow2_0_blend = (1 - alpha[..., 1, None]) * flow2_00 + alpha[..., 1, None] * flow2_01 + flow2_1_blend = (1 - alpha[..., 1, None]) * flow2_10 + alpha[..., 1, None] * flow2_11 + warp_coord_flow2 = (1 - alpha[..., 0, None]) * flow2_0_blend + alpha[..., 0, None] * flow2_1_blend + + # coordinates that flow2 remaps each flow1-mapped pixel to + rewarp_coord = warp_coord + warp_coord_flow2 + + # where the difference in position after flow1 and flow2 are applied is larger than a threshold there is likely an + # occlusion. set values to -1 so the final gaussian blur will spread the value a couple pixels around this area + squared_diff = np.sum((rewarp_coord - orig_coord) ** 2, axis=2) + threshold = 0.01 * np.sum(warp_coord_flow2 ** 2 + flow1 ** 2, axis=2) + 0.5 + + reliable_flow = np.ones((squared_diff.shape[0], squared_diff.shape[1], 3)) + reliable_flow[...,0] = np.where(squared_diff >= threshold, -0.75, 1) + + # areas mapping outside of the frame are also occluded (don't need extra region around these though, so set 0) + if edges_unreliable: + reliable_flow[...,1] = np.where( + np.logical_or.reduce( + ( + warp_coord[..., 0] < 0, + warp_coord[..., 1] < 0, + warp_coord[..., 0] >= h - 1, + warp_coord[..., 1] >= w - 1, + ) + ), + 0, + reliable_flow[...,1], + ) + + # get derivative of flow, large changes in derivative => edge of moving object + dx = np.diff(flow1, axis=1, append=0) + dy = np.diff(flow1, axis=0, append=0) + motion_edge = np.sum(dx ** 2 + dy ** 2, axis=2) + motion_threshold = 0.01 * np.sum(flow1 ** 2, axis=2) + 0.002 + reliable_flow[...,2] = np.where(np.logical_and(motion_edge > motion_threshold, reliable_flow[...,2] != -0.75), 0, reliable_flow[...,2]) + + return reliable_flow + + +# parser = argparse.ArgumentParser() +# parser.add_argument("--flow_fwd", type=str, required=True, help="Forward flow path or glob pattern") +# parser.add_argument("--flow_bwd", type=str, required=True, help="Backward flow path or glob pattern") +# parser.add_argument("--output", type=str, required=True, help="Output consistency map path") +# parser.add_argument("--output_postfix", type=str, default='_cc', help="Output consistency map name postfix") +# parser.add_argument("--image_output", action='store_true', help="Output consistency map as b\w image path") +# parser.add_argument("--skip_numpy_output", action='store_true', help="Don`t save numpy array") +# parser.add_argument("--blur", type=float, default=2., help="Gaussian blur kernel size (0 for no blur)") +# parser.add_argument("--bottom_clamp", type=float, default=0., help="Clamp lower values") +# parser.add_argument("--edges_reliable", action='store_true', help="Consider edges reliable") +# parser.add_argument("--save_separate_channels", action='store_true', help="Save consistency mask layers as separate channels") +# args = parser.parse_args() + +# def run(args): +# flow_fwd_many = sorted(glob.glob(args.flow_fwd)) +# flow_bwd_many = sorted(glob.glob(args.flow_bwd)) +# if len(flow_fwd_many)!= len(flow_bwd_many): +# raise Exception('Forward and backward flow file numbers don`t match') +# return + +# for flow_fwd,flow_bwd in tqdm(zip(flow_fwd_many, flow_bwd_many)): +# flow_fwd = flow_fwd.replace('\\','/') +# flow_bwd = flow_bwd.replace('\\','/') +# flow1 = np.load(flow_fwd) +# flow2 = np.load(flow_bwd) +# consistency_map_multilayer = make_consistency(flow1, flow2, edges_unreliable=not args.edges_reliable) + +# if args.save_separate_channels: +# consistency_map = consistency_map_multilayer +# else: +# consistency_map = np.ones_like(consistency_map_multilayer[...,0]) +# consistency_map*=consistency_map_multilayer[...,0] +# consistency_map*=consistency_map_multilayer[...,1] +# consistency_map*=consistency_map_multilayer[...,2] + +# # blur +# if args.blur>0.: +# consistency_map = scipy.ndimage.gaussian_filter(consistency_map, [args.blur, args.blur]) + +# #clip values between bottom_clamp and 1 +# bottom_clamp = min(max(args.bottom_clamp,0.), 0.999) +# consistency_map = consistency_map.clip(bottom_clamp, 1) +# out_fname = args.output+'/'+flow_fwd.split('/')[-1][:-4]+args.output_postfix + +# if not args.skip_numpy_output: +# np.save(out_fname, consistency_map) + +# #save as jpeg +# if args.image_output: +# PIL.Image.fromarray((consistency_map*255.).astype('uint8')).save(out_fname+'.jpg', quality=90) + +# run(args) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/defaults.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..8b2dbd454d956fc4b8659c3aed6f58f09d95e02f --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/defaults.py @@ -0,0 +1,219 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +def get_samplers_list(): + return { + 'euler a': 'Euler a', + 'euler': 'Euler', + 'lms': 'LMS', + 'heun': 'Heun', + 'dpm2': 'DPM2', + 'dpm2 a': 'DPM2 a', + 'dpm++ 2s a': 'DPM++ 2S a', + 'dpm++ 2m': 'DPM++ 2M', + 'dpm++ sde': 'DPM++ SDE', + 'dpm fast': 'DPM fast', + 'dpm adaptive': 'DPM adaptive', + 'lms karras': 'LMS Karras', + 'dpm2 karras': 'DPM2 Karras', + 'dpm2 a karras': 'DPM2 a Karras', + 'dpm++ 2s a karras': 'DPM++ 2S a Karras', + 'dpm++ 2m karras': 'DPM++ 2M Karras', + 'dpm++ sde karras': 'DPM++ SDE Karras' + } + +def DeforumAnimPrompts(): + return r"""{ + "0": "tiny cute bunny, vibrant diffraction, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus", + "30": "anthropomorphic clean cat, surrounded by fractals, epic angle and pose, symmetrical, 3d, depth of field", + "60": "a beautiful coconut --neg photo, realistic", + "90": "a beautiful durian, award winning photography" +} + """ + +# Guided images defaults +def get_guided_imgs_default_json(): + return '''{ + "0": "https://deforum.github.io/a1/Gi1.png", + "max_f/4-5": "https://deforum.github.io/a1/Gi2.png", + "max_f/2-10": "https://deforum.github.io/a1/Gi3.png", + "3*max_f/4-15": "https://deforum.github.io/a1/Gi4.jpg", + "max_f-20": "https://deforum.github.io/a1/Gi1.png" +}''' + +def get_hybrid_info_html(): + return """ +

+ Hybrid Video Compositing in 2D/3D Mode + + by reallybigname + +

+
    +
  • Composite video with previous frame init image in 2D or 3D animation_mode (not for Video Input mode)
  • +
  • Uses your Init settings for video_init_path, extract_nth_frame, overwrite_extracted_frames
  • +
  • In Keyframes tab, you can also set color_coherence = 'Video Input'
  • +
  • color_coherence_video_every_N_frames lets you only match every N frames
  • +
  • Color coherence may be used with hybrid composite off, to just use video color.
  • +
  • Hybrid motion may be used with hybrid composite off, to just use video motion.
  • +
+ Hybrid Video Schedules +
    +
  • The alpha schedule controls overall alpha for video mix, whether using a composite mask or not.
  • +
  • The hybrid_comp_mask_blend_alpha_schedule only affects the 'Blend' hybrid_comp_mask_type.
  • +
  • Mask contrast schedule is from 0-255. Normal is 1. Affects all masks.
  • +
  • Autocontrast low/high cutoff schedules 0-100. Low 0 High 100 is full range.
    (hybrid_comp_mask_auto_contrast must be enabled)
  • +
+ Click Here for more info/ a Guide. + """ + +def get_composable_masks_info_html(): + return """ +
    +
  • To enable, check use_mask in the Init tab
  • +
  • Supports boolean operations: (! - negation, & - and, | - or, ^ - xor, \ - difference, () - nested operations)
  • +
  • default variables: in \{\}, like \{init_mask\}, \{video_mask\}, \{everywhere\}
  • +
  • masks from files: in [], like [mask1.png]
  • +
  • description-based: word masks in <>, like <apple>, <hair>
  • +
+ """ + +def get_parseq_info_html(): + return """ +

Use a Parseq manifest for your animation (leave blank to ignore).

+

+ Fields managed in your Parseq manifest override the values and schedules set in other parts of this UI. You can select which values to override by using the "Managed Fields" section in Parseq. +

+ """ + +def get_prompts_info_html(): + return """ +
    +
  • Please always keep values in math functions above 0.
  • +
  • There is *no* Batch mode like in vanilla deforum. Please Use the txt2img tab for that.
  • +
  • For negative prompts, please write your positive prompt, then --neg ugly, text, assymetric, or any other negative tokens of your choice. OR:
  • +
  • Use the negative_prompts field to automatically append all words as a negative prompt. *Don't* add --neg in the negative_prompts field!
  • +
  • Prompts are stored in JSON format. If you've got an error, check it in a JSON Validator
  • +
+ """ + +def get_guided_imgs_info_html(): + return """ +

You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field. + Set the keyframes and the images that you want to show up. + Note: the number of frames between each keyframe should be greater than the tweening frames.

+ +

Prerequisites and Important Info:

+
    +
  • This mode works ONLY with 2D/3D animation modes. Interpolation and Video Input modes aren't supported.
  • +
  • Init tab's strength slider should be greater than 0. Recommended value (.65 - .80).
  • +
  • 'seed_behavior' will be forcibly set to 'schedule'.
  • +
+ +

Looping recommendations:

+
    +
  • seed_schedule should start and end on the same seed.
    + Example: seed_schedule could use 0:(5), 1:(-1), 219:(-1), 220:(5)
  • +
  • The 1st and last keyframe images should match.
  • +
  • Set your total number of keyframes to be 21 more than the last inserted keyframe image.
    + Example: Default args should use 221 as the total keyframes.
  • +
  • Prompts are stored in JSON format. If you've got an error, check it in the validator, + like here
  • +
+ +

The Guided images mode exposes the following variables for the prompts and the schedules:

+
    +
  • s is the initial seed for the whole video generation.
  • +
  • max_f is the length of the video, in frames.
    + Example: seed_schedule could use 0:(s), 1:(-1), "max_f-2":(-1), "max_f-1":(s)
  • +
  • t is the current frame number.
    + Example: strength_schedule could use 0:(0.25 * cos((72 / 60 * 3.141 * (t + 0) / 30))**13 + 0.7) to make alternating changes each 30 frames
  • +
+ """ + +def get_main_info_html(): + return """ +

Made by deforum.github.io, port for AUTOMATIC1111's webui maintained by Deforum LLC.

+

FOR HELP CLICK HERE

+
    +
  • The code for this extension: here.
  • +
  • Join the official Deforum Discord to share your creations and suggestions.
  • +
  • Official Deforum Wiki: here.
  • +
  • Anime-inclined great guide (by FizzleDorf) with lots of examples: here.
  • +
  • For advanced keyframing with Math functions, see here.
  • +
  • Alternatively, use sd-parseq as a UI to define your animation schedules (see the Parseq section in the Init tab).
  • +
  • framesync.xyz is also a good option, it makes compact math formulae for Deforum keyframes by selecting various waveforms.
  • +
  • The other site allows for making keyframes using interactive splines and Bezier curves (select Disco output format).
  • +
  • If you want to use Width/Height which are not multiples of 64, please change noise_type to 'Uniform', in Keyframes --> Noise.
  • +
+ If you liked this extension, please give it a star on GitHub! 😊 + """ +def get_frame_interpolation_info_html(): + return """ + Use RIFE / FILM Frame Interpolation to smooth out, slow-mo (or both) any video.

+

+ Supported engines: +

    +
  • RIFE v4.6 and FILM.
  • +
+

+

+ Important notes: +

    +
  • Frame Interpolation will *not* run if any of the following are enabled: 'Store frames in ram' / 'Skip video for run all'.
  • +
  • Audio (if provided) will *not* be transferred to the interpolated video if Slow-Mo is enabled.
  • +
  • 'add_soundtrack' and 'soundtrack_path' aren't being honoured in "Interpolate an existing video" mode. Original vid audio will be used instead with the same slow-mo rules above.
  • +
  • In "Interpolate existing pics" mode, FPS is determined *only* by output FPS slider. Audio will be added if requested even with slow-mo "enabled", as it does *nothing* in this mode.
  • +
+

+ """ +def get_frames_to_video_info_html(): + return """ +

+ Important Notes: +

    +
  • Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%09d.png', just replace 20230124234916 with your batch ID. The %09d is important, don't forget it!
  • +
  • In the filename, '%09d' represents the 9 counting numbers, For '20230124234916_000000001.png', use '20230124234916_%09d.png'
  • +
  • If non-deforum frames, use the correct number of counting digits. For files like 'bunnies-0000.jpg', you'd use 'bunnies-%04d.jpg'
  • +
+ """ +def get_leres_info_html(): + return 'Note that LeReS has a Non-Commercial license. Use it only for fun/personal use.' + +def get_gradio_html(section_name): + if section_name.lower() == 'hybrid_video': + return get_hybrid_info_html() + elif section_name.lower() == 'composable_masks': + return get_composable_masks_info_html() + elif section_name.lower() == 'parseq': + return get_parseq_info_html() + elif section_name.lower() == 'prompts': + return get_prompts_info_html() + elif section_name.lower() == 'guided_imgs': + return get_guided_imgs_info_html() + elif section_name.lower() == 'main': + return get_main_info_html() + elif section_name.lower() == 'frame_interpolation': + return get_frame_interpolation_info_html() + elif section_name.lower() == 'frames_to_video': + return get_frames_to_video_info_html() + elif section_name.lower() == 'leres': + return get_leres_info_html() + else: + return "" + +mask_fill_choices = ['fill', 'original', 'latent noise', 'latent nothing'] + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_controlnet.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..cef20cbc8517f5b9f3312a1b3ea9832dc72ca1ce --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_controlnet.py @@ -0,0 +1,368 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +# This helper script is responsible for ControlNet/Deforum integration +# https://github.com/Mikubill/sd-webui-controlnet — controlnet repo + +import os +import copy +import gradio as gr +import scripts +from PIL import Image +import numpy as np +import importlib +from modules import scripts, shared +from .deforum_controlnet_gradio import hide_ui_by_cn_status, hide_file_textboxes, ToolButton +from .general_utils import count_files_in_folder, clean_gradio_path_strings # TODO: do it another way +from .video_audio_utilities import vid2frames, convert_image +from .animation_key_frames import ControlNetKeys +from .load_images import load_image +from .general_utils import debug_print + +cnet = None +# number of CN model tabs to show in the deforum gui. If the user has set it in the A1111 UI to a value less than 5 +# then we set it to 5. Else, we respect the value they specified +max_models = shared.opts.data.get("control_net_unit_count", shared.opts.data.get("control_net_max_models_num", 5)) +num_of_models = 5 if max_models <= 5 else max_models + +def find_controlnet(): + global cnet + if cnet: return cnet + try: + cnet = importlib.import_module('extensions.sd-webui-controlnet.scripts.external_code', 'external_code') + except: + try: + cnet = importlib.import_module('extensions-builtin.sd-webui-controlnet.scripts.external_code', 'external_code') + except: + pass + if cnet: + print(f"\033[0;32m*Deforum ControlNet support: enabled*\033[0m") + return True + return None + +def controlnet_infotext(): + return """Requires the ControlNet extension to be installed.

+ If Deforum crashes due to CN updates, go here and report your problem.

+ """ + +def is_controlnet_enabled(controlnet_args): + for i in range(1, num_of_models + 1): + if getattr(controlnet_args, f'cn_{i}_enabled', False): + return True + return False + +def setup_controlnet_ui_raw(): + cnet = find_controlnet() + cn_models = cnet.get_models() + cn_preprocessors = cnet.get_modules() + + cn_modules = cnet.get_modules_detail() + preprocessor_sliders_config = {} + + for config_name, config_values in cn_modules.items(): + sliders = config_values.get('sliders', []) + preprocessor_sliders_config[config_name] = sliders + + model_free_preprocessors = ["reference_only", "reference_adain", "reference_adain+attn"] + flag_preprocessor_resolution = "Preprocessor Resolution" + + def build_sliders(module, pp): + grs = [] + if module not in preprocessor_sliders_config: + grs += [ + gr.update(label=flag_preprocessor_resolution, value=512, minimum=64, maximum=2048, step=1, visible=not pp, interactive=not pp), + gr.update(visible=False, interactive=False), + gr.update(visible=False, interactive=False), + gr.update(visible=True) + ] + else: + for slider_config in preprocessor_sliders_config[module]: + if isinstance(slider_config, dict): + visible = True + if slider_config['name'] == flag_preprocessor_resolution: + visible = not pp + grs.append(gr.update( + label=slider_config['name'], + value=slider_config['value'], + minimum=slider_config['min'], + maximum=slider_config['max'], + step=slider_config['step'] if 'step' in slider_config else 1, + visible=visible, + interactive=visible)) + else: + grs.append(gr.update(visible=False, interactive=False)) + while len(grs) < 3: + grs.append(gr.update(visible=False, interactive=False)) + grs.append(gr.update(visible=True)) + if module in model_free_preprocessors: + grs += [gr.update(visible=False, value='None'), gr.update(visible=False)] + else: + grs += [gr.update(visible=True), gr.update(visible=True)] + return grs + + refresh_symbol = '\U0001f504' # 🔄 + switch_values_symbol = '\U000021C5' # ⇅ + model_dropdowns = [] + infotext_fields = [] + + def create_model_in_tab_ui(cn_id): + with gr.Row(): + enabled = gr.Checkbox(label="Enable", value=False, interactive=True) + pixel_perfect = gr.Checkbox(label="Pixel Perfect", value=False, visible=False, interactive=True) + low_vram = gr.Checkbox(label="Low VRAM", value=False, visible=False, interactive=True) + overwrite_frames = gr.Checkbox(label='Overwrite input frames', value=True, visible=False, interactive=True) + with gr.Row(visible=False) as mod_row: + module = gr.Dropdown(cn_preprocessors, label=f"Preprocessor", value="none", interactive=True) + model = gr.Dropdown(cn_models, label=f"Model", value="None", interactive=True) + refresh_models = ToolButton(value=refresh_symbol) + refresh_models.click(refresh_all_models, model, model) + with gr.Row(visible=False) as weight_row: + weight = gr.Textbox(label="Weight schedule", lines=1, value='0:(1)', interactive=True) + with gr.Row(visible=False) as start_cs_row: + guidance_start = gr.Textbox(label="Starting Control Step schedule", lines=1, value='0:(0.0)', interactive=True) + with gr.Row(visible=False) as end_cs_row: + guidance_end = gr.Textbox(label="Ending Control Step schedule", lines=1, value='0:(1.0)', interactive=True) + model_dropdowns.append(model) + with gr.Column(visible=False) as advanced_column: + processor_res = gr.Slider(label="Annotator resolution", value=64, minimum=64, maximum=2048, interactive=False) + threshold_a = gr.Slider(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False) + threshold_b = gr.Slider(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False) + with gr.Row(visible=False) as vid_path_row: + vid_path = gr.Textbox(value='', label="ControlNet Input Video/ Image Path", interactive=True) + with gr.Row(visible=False) as mask_vid_path_row: # invisible temporarily since 26-04-23 until masks are fixed + mask_vid_path = gr.Textbox(value='', label="ControlNet Mask Video/ Image Path (*NOT WORKING, kept in UI for CN's devs testing!*)", interactive=True) + with gr.Row(visible=False) as control_mode_row: + control_mode = gr.Radio(choices=["Balanced", "My prompt is more important", "ControlNet is more important"], value="Balanced", label="Control Mode", interactive=True) + with gr.Row(visible=False) as env_row: + resize_mode = gr.Radio(choices=["Outer Fit (Shrink to Fit)", "Inner Fit (Scale to Fit)", "Just Resize"], value="Inner Fit (Scale to Fit)", label="Resize Mode", interactive=True) + with gr.Row(visible=False) as control_loopback_row: + loopback_mode = gr.Checkbox(label="LoopBack mode", value=False, interactive=True) + hide_output_list = [pixel_perfect, low_vram, mod_row, module, weight_row, start_cs_row, end_cs_row, env_row, overwrite_frames, vid_path_row, control_mode_row, mask_vid_path_row, + control_loopback_row] # add mask_vid_path_row when masks are working again + for cn_output in hide_output_list: + enabled.change(fn=hide_ui_by_cn_status, inputs=enabled, outputs=cn_output) + module.change(build_sliders, inputs=[module, pixel_perfect], outputs=[processor_res, threshold_a, threshold_b, advanced_column, model, refresh_models]) + # hide vid/image input fields + loopback_outs = [vid_path_row, mask_vid_path_row] + for loopback_output in loopback_outs: + loopback_mode.change(fn=hide_file_textboxes, inputs=loopback_mode, outputs=loopback_output) + # handle pixel perfect ui changes + pixel_perfect.change(build_sliders, inputs=[module, pixel_perfect], outputs=[processor_res, threshold_a, threshold_b, advanced_column, model, refresh_models]) + infotext_fields.extend([ + (module, f"ControlNet Preprocessor"), + (model, f"ControlNet Model"), + (weight, f"ControlNet Weight"), + ]) + + return {key: value for key, value in locals().items() if key in [ + "enabled", "pixel_perfect", "low_vram", "module", "model", "weight", + "guidance_start", "guidance_end", "processor_res", "threshold_a", "threshold_b", "resize_mode", "control_mode", + "overwrite_frames", "vid_path", "mask_vid_path", "loopback_mode" + ]} + + def refresh_all_models(*inputs): + cn_models = cnet.get_models(update=True) + dd = inputs[0] + selected = dd if dd in cn_models else "None" + return gr.Dropdown.update(value=selected, choices=cn_models) + + with gr.TabItem('ControlNet'): + gr.HTML(controlnet_infotext()) + with gr.Tabs(): + model_params = {} + for i in range(1, num_of_models + 1): + with gr.Tab(f"CN Model {i}"): + model_params[i] = create_model_in_tab_ui(i) + + for key, value in model_params[i].items(): + locals()[f"cn_{i}_{key}"] = value + + return locals() + +def setup_controlnet_ui(): + if not find_controlnet(): + gr.HTML("""ControlNet not found. Please install it :)""", elem_id='controlnet_not_found_html_msg') + return {} + + try: + return setup_controlnet_ui_raw() + except Exception as e: + print(f"'ControlNet UI setup failed with error: '{e}'!") + gr.HTML(f""" + Failed to setup ControlNet UI, check the reason in your commandline log. Please, downgrade your CN extension to c9340671d6d59e5a79fc404f78f747f969f87374 or report the problem here. + """, elem_id='controlnet_not_found_html_msg') + return {} + +def controlnet_component_names(): + if not find_controlnet(): + return [] + + return [f'cn_{i}_{component}' for i in range(1, num_of_models + 1) for component in [ + 'overwrite_frames', 'vid_path', 'mask_vid_path', 'enabled', + 'low_vram', 'pixel_perfect', + 'module', 'model', 'weight', 'guidance_start', 'guidance_end', + 'processor_res', 'threshold_a', 'threshold_b', 'resize_mode', 'control_mode', 'loopback_mode' + ]] + +def process_with_controlnet(p, args, anim_args, controlnet_args, root, parseq_adapter, is_img2img=True, frame_idx=0): + CnSchKeys = ControlNetKeys(anim_args, controlnet_args) if not parseq_adapter.use_parseq else parseq_adapter.cn_keys + + def read_cn_data(cn_idx): + cn_mask_np, cn_image_np = None, None + # Loopback mode ENABLED: + if getattr(controlnet_args, f'cn_{cn_idx}_loopback_mode'): + # On very first frame, check if use init enabled, and if init image is provided + if frame_idx == 0 and args.use_init and (args.init_image is not None or args.init_image_box is not None): + cn_image_np = load_image(args.init_image, args.init_image_box) + # convert to uint8 for compatibility with CN + cn_image_np = np.array(cn_image_np).astype('uint8') + # Not first frame, use previous img (init_sample) + elif frame_idx > 0 and root.init_sample: + cn_image_np = np.array(root.init_sample).astype('uint8') + else: # loopback mode is DISABLED + cn_inputframes = os.path.join(args.outdir, f'controlnet_{cn_idx}_inputframes') # set input frames folder path + if os.path.exists(cn_inputframes): + if count_files_in_folder(cn_inputframes) == 1: + cn_frame_path = os.path.join(cn_inputframes, "000000000.jpg") + print(f'Reading ControlNet *static* base frame at {cn_frame_path}') + else: + cn_frame_path = os.path.join(cn_inputframes, f"{frame_idx:09}.jpg") + print(f'Reading ControlNet {cn_idx} base frame #{frame_idx} at {cn_frame_path}') + if os.path.exists(cn_frame_path): + cn_image_np = np.array(Image.open(cn_frame_path).convert("RGB")).astype('uint8') + cn_maskframes = os.path.join(args.outdir, f'controlnet_{cn_idx}_maskframes') # set mask frames folder path + if os.path.exists(cn_maskframes): + if count_files_in_folder(cn_maskframes) == 1: + cn_mask_frame_path = os.path.join(cn_inputframes, "000000000.jpg") + print(f'Reading ControlNet *static* mask frame at {cn_mask_frame_path}') + else: + cn_mask_frame_path = os.path.join(args.outdir, f'controlnet_{cn_idx}_maskframes', f"{frame_idx:09}.jpg") + print(f'Reading ControlNet {cn_idx} mask frame #{frame_idx} at {cn_mask_frame_path}') + if os.path.exists(cn_mask_frame_path): + cn_mask_np = np.array(Image.open(cn_mask_frame_path).convert("RGB")).astype('uint8') + + return cn_mask_np, cn_image_np + + cnet = find_controlnet() + cn_data = [read_cn_data(i) for i in range(1, num_of_models + 1)] + + # Check if any loopback_mode is set to True + any_loopback_mode = any(getattr(controlnet_args, f'cn_{i}_loopback_mode') for i in range(1, num_of_models + 1)) + + cn_inputframes_list = [os.path.join(args.outdir, f'controlnet_{i}_inputframes') for i in range(1, num_of_models + 1)] + + if not any(os.path.exists(cn_inputframes) for cn_inputframes in cn_inputframes_list) and not any_loopback_mode: + print(f'\033[33mNeither the base nor the masking frames for ControlNet were found. Using the regular pipeline\033[0m') + + # Remove all scripts except controlnet. + # + # This is required because controlnet's access to p.script_args invokes @script_args.setter, + # which triggers *all* alwayson_scripts' setup() functions, with whatever happens to be in script_args. + # In the case of seed.py (which we really don't need with deforum), this ovewrites our p.seed & co, which we + # had carefully prepared previously. So let's remove the scripts to avoid the problem. + # + # An alternative would be to populate all the args with the correct values + # for all scripts, but this seems even more fragile, as it would break + # if a1111 adds or removed scripts. + # + # Note that we must copy scripts.scripts_img2img or scripts.scripts_txt2img before mutating it + # because it persists across requests. Shallow-copying is sufficient because we only mutate a top-level + # reference (scripts.alwayson_scripts) + # + p.scripts = copy.copy(scripts.scripts_img2img if is_img2img else scripts.scripts_txt2img) + controlnet_script = find_controlnet_script(p) + p.scripts.alwayson_scripts = [controlnet_script] + # Filling the list with None is safe because only the length will be considered, + # and all cn args will be replaced. + p.script_args_value = [None] * controlnet_script.args_to + + def create_cnu_dict(cn_args, prefix, img_np, mask_np, frame_idx, CnSchKeys): + + keys = [ + "enabled", "module", "model", "weight", "resize_mode", "control_mode", "low_vram", "pixel_perfect", + "processor_res", "threshold_a", "threshold_b", "guidance_start", "guidance_end" + ] + cnu = {k: getattr(cn_args, f"{prefix}_{k}") for k in keys} + model_num = int(prefix.split('_')[-1]) # Extract model number from prefix (e.g., "cn_1" -> 1) + if 1 <= model_num <= num_of_models: + # if in loopmode and no init image (img_np, after processing in this case) provided, disable CN unit for the very first frame. Will be enabled in the next frame automatically + if getattr(cn_args, f"cn_{model_num}_loopback_mode") and frame_idx == 0 and img_np is None: + cnu['enabled'] = False + cnu['weight'] = getattr(CnSchKeys, f"cn_{model_num}_weight_schedule_series")[frame_idx] + cnu['guidance_start'] = getattr(CnSchKeys, f"cn_{model_num}_guidance_start_schedule_series")[frame_idx] + cnu['guidance_end'] = getattr(CnSchKeys, f"cn_{model_num}_guidance_end_schedule_series")[frame_idx] + if cnu['enabled']: + debug_print(f"ControlNet {model_num}: weight={cnu['weight']}, guidance_start={cnu['guidance_start']}, guidance_end={cnu['guidance_end']}") + cnu['image'] = {'image': img_np, 'mask': mask_np} if mask_np is not None else img_np + + return cnu + + masks_np, images_np = zip(*cn_data) + + cn_units = [cnet.ControlNetUnit(**create_cnu_dict(controlnet_args, f"cn_{i + 1}", img_np, mask_np, frame_idx, CnSchKeys)) + for i, (img_np, mask_np) in enumerate(zip(images_np, masks_np))] + + cnet.update_cn_script_in_processing(p, cn_units, is_img2img=is_img2img, is_ui=False) + +def find_controlnet_script(p): + controlnet_script = next((script for script in p.scripts.alwayson_scripts if script.title().lower() == "controlnet"), None) + if not controlnet_script: + raise Exception("ControlNet script not found.") + return controlnet_script + +def process_controlnet_input_frames(args, anim_args, controlnet_args, video_path, mask_path, outdir_suffix, id): + if (video_path or mask_path) and getattr(controlnet_args, f'cn_{id}_enabled'): + frame_path = os.path.join(args.outdir, f'controlnet_{id}_{outdir_suffix}') + os.makedirs(frame_path, exist_ok=True) + + accepted_image_extensions = ('.jpg', '.jpeg', '.png', '.bmp') + if video_path and video_path.lower().endswith(accepted_image_extensions): + convert_image(video_path, os.path.join(frame_path, '000000000.jpg')) + print(f"Copied CN Model {id}'s single input image to inputframes folder!") + elif mask_path and mask_path.lower().endswith(accepted_image_extensions): + convert_image(mask_path, os.path.join(frame_path, '000000000.jpg')) + print(f"Copied CN Model {id}'s single input image to inputframes *mask* folder!") + else: + print(f'Unpacking ControlNet {id} {"video mask" if mask_path else "base video"}') + print(f"Exporting Video Frames to {frame_path}...") + vid2frames( + video_path=video_path or mask_path, + video_in_frame_path=frame_path, + n=1 if anim_args.animation_mode != 'Video Input' else anim_args.extract_nth_frame, + overwrite=getattr(controlnet_args, f'cn_{id}_overwrite_frames'), + extract_from_frame=0 if anim_args.animation_mode != 'Video Input' else anim_args.extract_from_frame, + extract_to_frame=(anim_args.max_frames - 1) if anim_args.animation_mode != 'Video Input' else anim_args.extract_to_frame, + numeric_files_output=True + ) + print(f"Loading {anim_args.max_frames} input frames from {frame_path} and saving video frames to {args.outdir}") + print(f'ControlNet {id} {"video mask" if mask_path else "base video"} unpacked!') + +def unpack_controlnet_vids(args, anim_args, controlnet_args): + # this func gets called from render.py once for an entire animation run --> + # tries to trigger an extraction of CN input frames (regular + masks) from video or image + for i in range(1, num_of_models + 1): + # LoopBack mode is enabled, no need to extract a video or copy an init image + if getattr(controlnet_args, f'cn_{i}_loopback_mode'): + print(f"ControlNet #{i} is in LoopBack mode, skipping video/ image extraction stage.") + continue + vid_path = clean_gradio_path_strings(getattr(controlnet_args, f'cn_{i}_vid_path', None)) + mask_path = clean_gradio_path_strings(getattr(controlnet_args, f'cn_{i}_mask_vid_path', None)) + + if vid_path: # Process base video, if available + process_controlnet_input_frames(args, anim_args, controlnet_args, vid_path, None, 'inputframes', i) + + if mask_path: # Process mask video, if available + process_controlnet_input_frames(args, anim_args, controlnet_args, None, mask_path, 'maskframes', i) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_controlnet_gradio.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_controlnet_gradio.py new file mode 100644 index 0000000000000000000000000000000000000000..1516e0fa0ff07a8a90c6ddb5bd5ec374142bb696 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_controlnet_gradio.py @@ -0,0 +1,50 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import gradio as gr +# print (cnet_1.get_modules()) + + # *** TODO: re-enable table printing! disabled only temp! 13-04-23 *** + # table = Table(title="ControlNet params",padding=0, box=box.ROUNDED) + + # TODO: auto infer the names and the values for the table + # field_names = [] + # field_names += ["module", "model", "weight", "inv", "guide_start", "guide_end", "guess", "resize", "rgb_bgr", "proc res", "thr a", "thr b"] + # for field_name in field_names: + # table.add_column(field_name, justify="center") + + # cn_model_name = str(controlnet_args.cn_1_model) + + # rows = [] + # rows += [controlnet_args.cn_1_module, cn_model_name[len('control_'):] if 'control_' in cn_model_name else cn_model_name, controlnet_args.cn_1_weight, controlnet_args.cn_1_invert_image, controlnet_args.cn_1_guidance_start, controlnet_args.cn_1_guidance_end, controlnet_args.cn_1_guess_mode, controlnet_args.cn_1_resize_mode, controlnet_args.cn_1_rgbbgr_mode, controlnet_args.cn_1_processor_res, controlnet_args.cn_1_threshold_a, controlnet_args.cn_1_threshold_b] + # rows = [str(x) for x in rows] + + # table.add_row(*rows) + # console.print(table) + +def hide_ui_by_cn_status(choice): + return gr.update(visible=True) if choice else gr.update(visible=False) + +def hide_file_textboxes(choice): + return gr.update(visible=False) if choice else gr.update(visible=True) + +class ToolButton(gr.Button, gr.components.FormComponent): + """Small button with single emoji as text, fits inside gradio forms""" + def __init__(self, **kwargs): + super().__init__(variant="tool", **kwargs) + + def get_block_name(self): + return "button" \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_tqdm.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_tqdm.py new file mode 100644 index 0000000000000000000000000000000000000000..9670743d9037bb605cd22dcbbae56db50a525a01 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deforum_tqdm.py @@ -0,0 +1,98 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +from math import ceil +import tqdm +from modules.shared import progress_print_out, opts, cmd_opts + +class DeforumTQDM: + def __init__(self, args, anim_args, parseq_args, video_args): + self._tqdm = None + self._args = args + self._anim_args = anim_args + self._parseq_args = parseq_args + self._video_args = video_args + + def reset(self): + from .animation_key_frames import DeformAnimKeys + from .parseq_adapter import ParseqAdapter + deforum_total = 0 + # FIXME: get only amount of steps + parseq_adapter = ParseqAdapter(self._parseq_args, self._anim_args, self._video_args, None, None, mute=True) + keys = DeformAnimKeys(self._anim_args) if not parseq_adapter.use_parseq else parseq_adapter.anim_keys + + start_frame = 0 + if self._anim_args.resume_from_timestring: + for tmp in os.listdir(self._args.outdir): + filename = tmp.split("_") + # don't use saved depth maps to count number of frames + if self._anim_args.resume_timestring in filename and "depth" not in filename: + start_frame += 1 + start_frame = start_frame - 1 + using_vid_init = self._anim_args.animation_mode == 'Video Input' + turbo_steps = 1 if using_vid_init else int(self._anim_args.diffusion_cadence) + if self._anim_args.resume_from_timestring: + last_frame = start_frame - 1 + if turbo_steps > 1: + last_frame -= last_frame % turbo_steps + if turbo_steps > 1: + turbo_next_frame_idx = last_frame + turbo_prev_frame_idx = turbo_next_frame_idx + start_frame = last_frame + turbo_steps + frame_idx = start_frame + had_first = False + while frame_idx < self._anim_args.max_frames: + strength = keys.strength_schedule_series[frame_idx] + if not had_first and self._args.use_init and ((self._args.init_image is not None and self._args.init_image != '') or self._args.init_image_box is not None): + deforum_total += int(ceil(self._args.steps * (1 - strength))) + had_first = True + elif not had_first: + deforum_total += self._args.steps + had_first = True + else: + deforum_total += int(ceil(self._args.steps * (1 - strength))) + + if turbo_steps > 1: + frame_idx += turbo_steps + else: + frame_idx += 1 + + self._tqdm = tqdm.tqdm( + desc="Deforum progress", + total=deforum_total, + position=1, + file=progress_print_out + ) + + def update(self): + if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.update() + + def updateTotal(self, new_total): + if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.total = new_total + + def clear(self): + if self._tqdm is not None: + self._tqdm.close() + self._tqdm = None diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deprecation_utils.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deprecation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fd4d62a77e041f1c12279beef367883a193a43ff --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/deprecation_utils.py @@ -0,0 +1,98 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +# This file is used to map deprecated setting names in a dictionary +# and print a message containing the old and the new names + +deprecation_map = { + "histogram_matching": None, + "flip_2d_perspective": "enable_perspective_flip", + "skip_video_for_run_all": "skip_video_creation", + "color_coherence": [ + ("Match Frame 0 HSV", "HSV", False), + ("Match Frame 0 LAB", "LAB", False), + ("Match Frame 0 RGB", "RGB", False), + # ,("removed_value", None, True) # for removed values, if we'll need in the future + ], + "hybrid_composite": [ + (False, "None", False), + (True, "Normal", False), + ], + "optical_flow_redo_generation": [ + (False, "None", False), + (True, "DIS Fine", False), + ], + "optical_flow_cadence": [ + (False, "None", False), + (True, "DIS Fine", False), + ], + "cn_1_resize_mode": [ + ("Envelope (Outer Fit)", "Outer Fit (Shrink to Fit)", False), + ("Scale to Fit (Inner Fit)", "Inner Fit (Scale to Fit)", False), + ], + "cn_2_resize_mode": [ + ("Envelope (Outer Fit)", "Outer Fit (Shrink to Fit)", False), + ("Scale to Fit (Inner Fit)", "Inner Fit (Scale to Fit)", False), + ], + "cn_3_resize_mode": [ + ("Envelope (Outer Fit)", "Outer Fit (Shrink to Fit)", False), + ("Scale to Fit (Inner Fit)", "Inner Fit (Scale to Fit)", False), + ], + "use_zoe_depth": ("depth_algorithm", [("True", "Zoe+AdaBins (old)"), ("False", "Midas+AdaBins (old)")]), +} + +def dynamic_num_to_schedule_formatter(old_value): + return f"0:({old_value})" + +for i in range(1, 6): # 5 CN models in total + deprecation_map[f"cn_{i}_weight"] = dynamic_num_to_schedule_formatter + deprecation_map[f"cn_{i}_guidance_start"] = dynamic_num_to_schedule_formatter + deprecation_map[f"cn_{i}_guidance_end"] = dynamic_num_to_schedule_formatter + +def handle_deprecated_settings(settings_json): + # Set legacy_colormatch mode to True when importing old files, so results are backwards-compatible. Print a message about it too + if 'legacy_colormatch' not in settings_json: + settings_json['legacy_colormatch'] = True + print('\033[33mlegacy_colormatch is missing from settings file, so we are setting it to *True* for backwards compatability. You are welcome to test your file with that setting being disabled for better color coherency.\033[0m') + print("") + for setting_name, deprecation_info in deprecation_map.items(): + if setting_name in settings_json: + if deprecation_info is None: + print(f"WARNING: Setting '{setting_name}' has been removed. It will be discarded and the default value used instead!") + elif isinstance(deprecation_info, tuple): + new_setting_name, value_map = deprecation_info + old_value = str(settings_json.pop(setting_name)) # Convert the boolean value to a string for comparison + new_value = next((v for k, v in value_map if k == old_value), None) + if new_value is not None: + print(f"WARNING: Setting '{setting_name}' has been renamed to '{new_setting_name}' with value '{new_value}'. The saved settings file will reflect the change") + settings_json[new_setting_name] = new_value + elif callable(deprecation_info): + old_value = settings_json[setting_name] + if isinstance(old_value, (int, float)): + new_value = deprecation_info(old_value) + print(f"WARNING: Value '{old_value}' for setting '{setting_name}' has been replaced with '{new_value}'. The saved settings file will reflect the change") + settings_json[setting_name] = new_value + elif isinstance(deprecation_info, str): + print(f"WARNING: Setting '{setting_name}' has been renamed to '{deprecation_info}'. The saved settings file will reflect the change") + settings_json[deprecation_info] = settings_json.pop(setting_name) + elif isinstance(deprecation_info, list): + for old_value, new_value, is_removed in deprecation_info: + if settings_json[setting_name] == old_value: + if is_removed: + print(f"WARNING: Value '{old_value}' for setting '{setting_name}' has been removed. It will be discarded and the default value used instead!") + else: + print(f"WARNING: Value '{old_value}' for setting '{setting_name}' has been replaced with '{new_value}'. The saved settings file will reflect the change") + settings_json[setting_name] = new_value \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth.py new file mode 100644 index 0000000000000000000000000000000000000000..ecff47ce7c0bc3586005dcd56692e51133a1ec6e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth.py @@ -0,0 +1,159 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import gc +import cv2 +import numpy as np +import torch +from PIL import Image +from einops import rearrange, repeat +from modules import devices +from modules.shared import cmd_opts +from .depth_adabins import AdaBinsModel +from .depth_leres import LeReSDepth +from .depth_midas import MidasDepth +from .depth_zoe import ZoeDepth +from .general_utils import debug_print + +class DepthModel: + _instance = None + + def __new__(cls, *args, **kwargs): + keep_in_vram = kwargs.get('keep_in_vram', False) + depth_algorithm = kwargs.get('depth_algorithm', 'Midas-3-Hybrid') + Width, Height = kwargs.get('Width', 512), kwargs.get('Height', 512) + midas_weight = kwargs.get('midas_weight', 0.2) + model_switched = cls._instance and cls._instance.depth_algorithm != depth_algorithm + resolution_changed = cls._instance and (cls._instance.Width != Width or cls._instance.Height != Height) + zoe_algorithm = 'zoe' in depth_algorithm.lower() + model_deleted = cls._instance and cls._instance.should_delete + + should_reload = (cls._instance is None or model_deleted or model_switched or (zoe_algorithm and resolution_changed)) + + if should_reload: + cls._instance = super().__new__(cls) + cls._instance._initialize(models_path=args[0], device=args[1], half_precision=not cmd_opts.no_half, keep_in_vram=keep_in_vram, depth_algorithm=depth_algorithm, Width=Width, Height=Height, midas_weight=midas_weight) + elif cls._instance.should_delete and keep_in_vram: + cls._instance._initialize(models_path=args[0], device=args[1], half_precision=not cmd_opts.no_half, keep_in_vram=keep_in_vram, depth_algorithm=depth_algorithm, Width=Width, Height=Height, midas_weight=midas_weight) + cls._instance.should_delete = not keep_in_vram + return cls._instance + + def _initialize(self, models_path, device, half_precision=not cmd_opts.no_half, keep_in_vram=False, depth_algorithm='Midas-3-Hybrid', Width=512, Height=512, midas_weight=1.0): + self.models_path = models_path + self.device = device + self.half_precision = half_precision + self.keep_in_vram = keep_in_vram + self.depth_algorithm = depth_algorithm + self.Width, self.Height = Width, Height + self.midas_weight = midas_weight + self.depth_min, self.depth_max = 1000, -1000 + self.adabins_helper = None + self._initialize_model() + + def _initialize_model(self): + depth_algo = self.depth_algorithm.lower() + if depth_algo.startswith('zoe'): + self.zoe_depth = ZoeDepth(self.Width, self.Height) + if depth_algo == 'zoe+adabins (old)': + self.adabins_model = AdaBinsModel(self.models_path, keep_in_vram=self.keep_in_vram) + self.adabins_helper = self.adabins_model.adabins_helper + elif depth_algo == 'leres': + self.leres_depth = LeReSDepth(width=448, height=448, models_path=self.models_path, checkpoint_name='res101.pth', backbone='resnext101') + elif depth_algo == 'adabins': + self.adabins_model = AdaBinsModel(self.models_path, keep_in_vram=self.keep_in_vram) + self.adabins_helper = self.adabins_model.adabins_helper + elif depth_algo.startswith('midas'): + self.midas_depth = MidasDepth(self.models_path, self.device, half_precision=self.half_precision, midas_model_type=self.depth_algorithm) + if depth_algo == 'midas+adabins (old)': + self.adabins_model = AdaBinsModel(self.models_path, keep_in_vram=self.keep_in_vram) + self.adabins_helper = self.adabins_model.adabins_helper + else: + raise Exception(f"Unknown depth_algorithm: {self.depth_algorithm}") + + def predict(self, prev_img_cv2, midas_weight, half_precision) -> torch.Tensor: + + img_pil = Image.fromarray(cv2.cvtColor(prev_img_cv2.astype(np.uint8), cv2.COLOR_RGB2BGR)) + + if self.depth_algorithm.lower().startswith('zoe'): + depth_tensor = self.zoe_depth.predict(img_pil).to(self.device) + if self.depth_algorithm.lower() == 'zoe+adabins (old)' and midas_weight < 1.0: + use_adabins, adabins_depth = AdaBinsModel._instance.predict(img_pil, prev_img_cv2) + if use_adabins: # if there was no error in getting the adabins depth, align midas with adabins + depth_tensor = self.blend_and_align_with_adabins(depth_tensor, adabins_depth, midas_weight) + elif self.depth_algorithm.lower() == 'leres': + depth_tensor = self.leres_depth.predict(prev_img_cv2.astype(np.float32) / 255.0) + elif self.depth_algorithm.lower() == 'adabins': + use_adabins, adabins_depth = AdaBinsModel._instance.predict(img_pil, prev_img_cv2) + depth_tensor = torch.tensor(adabins_depth) + if use_adabins is False: + raise Exception("Error getting depth from AdaBins") # TODO: fallback to something else maybe? + elif self.depth_algorithm.lower().startswith('midas'): + depth_tensor = self.midas_depth.predict(prev_img_cv2, half_precision) + if self.depth_algorithm.lower() == 'midas+adabins (old)' and midas_weight < 1.0: + use_adabins, adabins_depth = AdaBinsModel._instance.predict(img_pil, prev_img_cv2) + if use_adabins: # if there was no error in getting the adabins depth, align midas with adabins + depth_tensor = self.blend_and_align_with_adabins(depth_tensor, adabins_depth, midas_weight) + else: # Unknown! + raise Exception(f"Unknown depth_algorithm passed to depth.predict function: {self.depth_algorithm}") + + return depth_tensor + + def blend_and_align_with_adabins(self, depth_tensor, adabins_depth, midas_weight): + depth_tensor = torch.subtract(50.0, depth_tensor) / 19.0 # align midas depth with adabins depth. Original alignment code from Disco Diffusion + blended_depth_map = (depth_tensor.cpu().numpy() * midas_weight + adabins_depth * (1.0 - midas_weight)) + depth_tensor = torch.from_numpy(np.expand_dims(blended_depth_map, axis=0)).squeeze().to(self.device) + debug_print(f"Blended Midas Depth with AdaBins Depth") + return depth_tensor + + def to(self, device): + self.device = device + if self.depth_algorithm.lower().startswith('zoe'): + self.zoe_depth.zoe.to(device) + elif self.depth_algorithm.lower() == 'leres': + self.leres_depth.to(device) + elif self.depth_algorithm.lower().startswith('midas'): + self.midas_depth.to(device) + if hasattr(self, 'adabins_model'): + self.adabins_model.to(device) + gc.collect() + torch.cuda.empty_cache() + + def to_image(self, depth: torch.Tensor): + depth = depth.cpu().numpy() + depth = np.expand_dims(depth, axis=0) if len(depth.shape) == 2 else depth + self.depth_min, self.depth_max = min(self.depth_min, depth.min()), max(self.depth_max, depth.max()) + denom = max(1e-8, self.depth_max - self.depth_min) + temp = rearrange((depth - self.depth_min) / denom * 255, 'c h w -> h w c') + return Image.fromarray(repeat(temp, 'h w 1 -> h w c', c=3).astype(np.uint8)) + + def save(self, filename: str, depth: torch.Tensor): + self.to_image(depth).save(filename) + + def delete_model(self): + for attr in ['zoe_depth', 'leres_depth']: + if hasattr(self, attr): + getattr(self, attr).delete() + delattr(self, attr) + + if hasattr(self, 'midas_depth'): + del self.midas_depth + + if hasattr(self, 'adabins_model'): + self.adabins_model.delete_model() + + gc.collect() + torch.cuda.empty_cache() + devices.torch_gc() \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_adabins.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_adabins.py new file mode 100644 index 0000000000000000000000000000000000000000..6b696abadd0a8105db4132b414df9cf3908311fe --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_adabins.py @@ -0,0 +1,78 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import torch +import numpy as np +from PIL import Image +import torchvision.transforms.functional as TF +from .general_utils import download_file_with_checksum +from infer import InferenceHelper + +class AdaBinsModel: + _instance = None + + def __new__(cls, *args, **kwargs): + keep_in_vram = kwargs.get('keep_in_vram', False) + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._instance._initialize(*args, keep_in_vram=keep_in_vram) + return cls._instance + + def _initialize(self, models_path, keep_in_vram=False): + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.keep_in_vram = keep_in_vram + self.adabins_helper = None + + download_file_with_checksum(url='https://github.com/hithereai/deforum-for-automatic1111-webui/releases/download/AdaBins/AdaBins_nyu.pt', expected_checksum='643db9785c663aca72f66739427642726b03acc6c4c1d3755a4587aa2239962746410d63722d87b49fc73581dbc98ed8e3f7e996ff7b9c0d56d0fbc98e23e41a', dest_folder=models_path, dest_filename='AdaBins_nyu.pt') + + self.adabins_helper = InferenceHelper(models_path=models_path, dataset='nyu', device=self.device) + + def predict(self, img_pil, prev_img_cv2): + w, h = prev_img_cv2.shape[1], prev_img_cv2.shape[0] + adabins_depth = np.array([]) + use_adabins = True + MAX_ADABINS_AREA, MIN_ADABINS_AREA = 500000, 448 * 448 + + image_pil_area, resized = w * h, False + + if image_pil_area not in range(MIN_ADABINS_AREA, MAX_ADABINS_AREA + 1): + scale = ((MAX_ADABINS_AREA if image_pil_area > MAX_ADABINS_AREA else MIN_ADABINS_AREA) / image_pil_area) ** 0.5 + depth_input = img_pil.resize((int(w * scale), int(h * scale)), Image.LANCZOS if image_pil_area > MAX_ADABINS_AREA else Image.BICUBIC) + print(f"AdaBins depth resized to {depth_input.width}x{depth_input.height}") + resized = True + else: + depth_input = img_pil + + try: + with torch.no_grad(): + _, adabins_depth = self.adabins_helper.predict_pil(depth_input) + if resized: + adabins_depth = TF.resize(torch.from_numpy(adabins_depth), torch.Size([h, w]), interpolation=TF.InterpolationMode.BICUBIC).cpu().numpy() + adabins_depth = adabins_depth.squeeze() + except Exception as e: + print("AdaBins exception encountered. Falling back to pure MiDaS/Zoe (only if running in Legacy Midas/Zoe+AdaBins mode)") + use_adabins = False + torch.cuda.empty_cache() + + return use_adabins, adabins_depth + + def to(self, device): + self.device = device + if self.adabins_helper is not None: + self.adabins_helper.to(device) + + def delete_model(self): + del self.adabins_helper diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_leres.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_leres.py new file mode 100644 index 0000000000000000000000000000000000000000..43cd5451e0f8a161c711055eba96268b5ee95ca3 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_leres.py @@ -0,0 +1,71 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import torch +import cv2 +import os +import numpy as np +import torchvision.transforms as transforms +from .general_utils import download_file_with_checksum +from leres.lib.multi_depth_model_woauxi import RelDepthModel +from leres.lib.net_tools import load_ckpt + +class LeReSDepth: + def __init__(self, width=448, height=448, models_path=None, checkpoint_name='res101.pth', backbone='resnext101'): + self.width = width + self.height = height + self.models_path = models_path + self.checkpoint_name = checkpoint_name + self.backbone = backbone + + download_file_with_checksum(url='https://cloudstor.aarnet.edu.au/plus/s/lTIJF4vrvHCAI31/download', expected_checksum='7fdc870ae6568cb28d56700d0be8fc45541e09cea7c4f84f01ab47de434cfb7463cacae699ad19fe40ee921849f9760dedf5e0dec04a62db94e169cf203f55b1', dest_folder=models_path, dest_filename=self.checkpoint_name) + + self.depth_model = RelDepthModel(backbone=self.backbone) + self.depth_model.eval() + self.DEVICE = "cuda" if torch.cuda.is_available() else "cpu" + self.depth_model.to(self.DEVICE) + load_ckpt(os.path.join(self.models_path, self.checkpoint_name), self.depth_model, None, None) + + @staticmethod + def scale_torch(img): + if len(img.shape) == 2: + img = img[np.newaxis, :, :] + if img.shape[2] == 3: + transform = transforms.Compose([transforms.ToTensor(), + transforms.Normalize((0.485, 0.456, 0.406) , (0.229, 0.224, 0.225))]) + img = transform(img) + else: + img = img.astype(np.float32) + img = torch.from_numpy(img) + return img + + def predict(self, image): + resized_image = cv2.resize(image, (self.width, self.height)) + img_torch = self.scale_torch(resized_image)[None, :, :, :] + pred_depth = self.depth_model.inference(img_torch).cpu().numpy().squeeze() + pred_depth_ori = cv2.resize(pred_depth, (image.shape[1], image.shape[0])) + return torch.from_numpy(pred_depth_ori).unsqueeze(0).to(self.DEVICE) + + def save_raw_depth(self, depth, filepath): + depth_normalized = (depth / depth.max() * 60000).astype(np.uint16) + cv2.imwrite(filepath, depth_normalized) + + def to(self, device): + self.DEVICE = device + self.depth_model = self.depth_model.to(device) + + def delete(self): + del self.depth_model \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_midas.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_midas.py new file mode 100644 index 0000000000000000000000000000000000000000..6af4cdb85769b25f4efd98ad0c0d3b380e6de34d --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_midas.py @@ -0,0 +1,91 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import cv2 +import torch +import numpy as np +from .general_utils import download_file_with_checksum +from midas.dpt_depth import DPTDepthModel +from midas.transforms import Resize, NormalizeImage, PrepareForNet +import torchvision.transforms as T + +class MidasDepth: + def __init__(self, models_path, device, half_precision=True, midas_model_type='Midas-3-Hybrid'): + if midas_model_type.lower() == 'midas-3.1-beitlarge': + self.midas_model_filename = 'dpt_beit_large_512.pt' + self.midas_model_checksum='66cbb00ea7bccd6e43d3fd277bd21002d8d8c2c5c487e5fcd1e1d70c691688a19122418b3ddfa94e62ab9f086957aa67bbec39afe2b41c742aaaf0699ee50b33' + self.midas_model_url = 'https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt' + self.resize_px = 512 + self.backbone = 'beitl16_512' + else: + self.midas_model_filename = 'dpt_large-midas-2f21e586.pt' + self.midas_model_checksum = 'fcc4829e65d00eeed0a38e9001770676535d2e95c8a16965223aba094936e1316d569563552a852d471f310f83f597e8a238987a26a950d667815e08adaebc06' + self.midas_model_url = 'https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt' + self.resize_px = 384 + self.backbone = 'vitl16_384' + self.device = device + self.normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + self.midas_transform = T.Compose([ + Resize(self.resize_px, self.resize_px, resize_target=None, keep_aspect_ratio=True, ensure_multiple_of=32, + resize_method="minimal", image_interpolation_method=cv2.INTER_CUBIC), + self.normalization, + PrepareForNet() + ]) + + download_file_with_checksum(url=self.midas_model_url, expected_checksum=self.midas_model_checksum, dest_folder=models_path, dest_filename=self.midas_model_filename) + + self.load_midas_model(models_path, self.midas_model_filename) + if half_precision: + self.midas_model = self.midas_model.half() + + def load_midas_model(self, models_path, midas_model_filename): + model_file = os.path.join(models_path, midas_model_filename) + print(f"Loading MiDaS model from {midas_model_filename}...") + self.midas_model = DPTDepthModel( + path=model_file, + backbone=self.backbone, + non_negative=True, + ) + self.midas_model.eval().to(self.device, memory_format=torch.channels_last if self.device == torch.device("cuda") else None) + + def predict(self, prev_img_cv2, half_precision): + img_midas = prev_img_cv2.astype(np.float32) / 255.0 + img_midas_input = self.midas_transform({"image": img_midas})["image"] + sample = torch.from_numpy(img_midas_input).float().to(self.device).unsqueeze(0) + + if self.device.type == "cuda" or self.device.type == "mps": + sample = sample.to(memory_format=torch.channels_last) + if half_precision: + sample = sample.half() + + with torch.no_grad(): + midas_depth = self.midas_model.forward(sample) + midas_depth = torch.nn.functional.interpolate( + midas_depth.unsqueeze(1), + size=img_midas.shape[:2], + mode="bicubic", + align_corners=False, + ).squeeze().cpu().numpy() + + torch.cuda.empty_cache() + depth_tensor = torch.from_numpy(np.expand_dims(midas_depth, axis=0)).squeeze().to(self.device) + + return depth_tensor + + def to(self, device): + self.device = device + self.midas_model = self.midas_model.to(device, memory_format=torch.channels_last if device == torch.device("cuda") else None) \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_zoe.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_zoe.py new file mode 100644 index 0000000000000000000000000000000000000000..bb6eda25be7954c5510d08181b61359ed33181ca --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/depth_zoe.py @@ -0,0 +1,46 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import torch +from zoedepth.models.builder import build_model +from zoedepth.utils.config import get_config + +class ZoeDepth: + def __init__(self, width=512, height=512): + conf = get_config("zoedepth_nk", "infer") + conf.img_size = [width, height] + self.model_zoe = build_model(conf) + self.DEVICE = "cuda" if torch.cuda.is_available() else "cpu" + self.zoe = self.model_zoe.to(self.DEVICE) + self.width = width + self.height = height + + def predict(self, image): + self.zoe.core.prep.resizer._Resize__width = self.width + self.zoe.core.prep.resizer._Resize__height = self.height + depth_tensor = self.zoe.infer_pil(image, output_type="tensor") + return depth_tensor + + def to(self, device): + self.DEVICE = device + self.zoe = self.model_zoe.to(device) + + def save_raw_depth(self, depth, filepath): + depth.save(filepath, format='PNG', mode='I;16') + + def delete(self): + del self.model_zoe + del self.zoe \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/frame_interpolation.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/frame_interpolation.py new file mode 100644 index 0000000000000000000000000000000000000000..d46556866315d42b4c6ad7e4ee6c017e5902d45c --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/frame_interpolation.py @@ -0,0 +1,240 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +from pathlib import Path +from rife.inference_video import run_rife_new_video_infer +from .video_audio_utilities import get_quick_vid_info, vid2frames, media_file_has_audio, extract_number, ffmpeg_stitch_video +from film_interpolation.film_inference import run_film_interp_infer +from .general_utils import duplicate_pngs_from_folder, checksum, convert_images_from_list +from modules.shared import opts + +DEBUG_MODE = opts.data.get("deforum_debug_mode_enabled", False) + +# gets 'RIFE v4.3', returns: 'RIFE43' +def extract_rife_name(string): + parts = string.split() + if len(parts) != 2 or parts[0] != "RIFE" or (parts[1][0] != "v" or not parts[1][1:].replace('.','').isdigit()): + raise ValueError("Input string should contain exactly 2 words, first word should be 'RIFE' and second word should start with 'v' followed by 2 numbers") + return "RIFE"+parts[1][1:].replace('.','') + +# This function usually gets a filename, and converts it to a legal linux/windows *folder* name +def clean_folder_name(string): + illegal_chars = "/\\<>:\"|?*.,\" " + translation_table = str.maketrans(illegal_chars, "_"*len(illegal_chars)) + return string.translate(translation_table) + +def set_interp_out_fps(interp_x, slow_x_enabled, slom_x, in_vid_fps): + if interp_x == 'Disabled' or in_vid_fps in ('---', None, '', 'None'): + return '---' + + fps = float(in_vid_fps) * int(interp_x) + # if slom_x != -1: + if slow_x_enabled: + fps /= int(slom_x) + return int(fps) if fps.is_integer() else fps + +# get uploaded video frame count, fps, and return 3 valuees for the gradio UI: in fcount, in fps, out fps (using the set_interp_out_fps function above) +def gradio_f_interp_get_fps_and_fcount(vid_path, interp_x, slow_x_enabled, slom_x): + if vid_path is None: + return '---', '---', '---' + fps, fcount, resolution = get_quick_vid_info(vid_path.name) + expected_out_fps = set_interp_out_fps(interp_x, slow_x_enabled, slom_x, fps) + return (str(round(fps,2)) if fps is not None else '---', (round(fcount,2)) if fcount is not None else '---', round(expected_out_fps,2)) + +# handle call to interpolate an uploaded video from gradio button in args.py (the function that calls this func is named 'upload_vid_to_rife') +def process_interp_vid_upload_logic(file, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, in_vid_fps, f_models_path, vid_file_name): + + print("got a request to *frame interpolate* an existing video.") + + _, _, resolution = get_quick_vid_info(file.name) + folder_name = clean_folder_name(Path(vid_file_name).stem) + outdir = opts.outdir_samples or os.path.join(os.getcwd(), 'outputs') + outdir_no_tmp = outdir + f'/frame-interpolation/{folder_name}' + i = 1 + while os.path.exists(outdir_no_tmp): + outdir_no_tmp = f"{outdir}/frame-interpolation/{folder_name}_{i}" + i += 1 + + outdir = os.path.join(outdir_no_tmp, 'tmp_input_frames') + os.makedirs(outdir, exist_ok=True) + + vid2frames(video_path=file.name, video_in_frame_path=outdir, overwrite=True, extract_from_frame=0, extract_to_frame=-1, numeric_files_output=True, out_img_format='png') + + # check if the uploaded vid has an audio stream. If it doesn't, set audio param to None so that ffmpeg won't try to add non-existing audio to final video. + audio_file_to_pass = None + if media_file_has_audio(file.name, f_location): + audio_file_to_pass = file.name + + process_video_interpolation(frame_interpolation_engine=engine, frame_interpolation_x_amount=x_am, frame_interpolation_slow_mo_enabled = sl_enabled,frame_interpolation_slow_mo_amount=sl_am, orig_vid_fps=in_vid_fps, deforum_models_path=f_models_path, real_audio_track=audio_file_to_pass, raw_output_imgs_path=outdir, img_batch_id=None, ffmpeg_location=f_location, ffmpeg_crf=f_crf, ffmpeg_preset=f_preset, keep_interp_imgs=keep_imgs, orig_vid_name=folder_name, resolution=resolution) + +# handle params before talking with the actual interpolation module (rifee/film, more to be added) +def process_video_interpolation(frame_interpolation_engine, frame_interpolation_x_amount, frame_interpolation_slow_mo_enabled, frame_interpolation_slow_mo_amount, orig_vid_fps, deforum_models_path, real_audio_track, raw_output_imgs_path, img_batch_id, ffmpeg_location, ffmpeg_crf, ffmpeg_preset, keep_interp_imgs, orig_vid_name, resolution, dont_change_fps=False, srt_path=None): + + is_random_pics_run = dont_change_fps + fps = float(orig_vid_fps) * (1 if is_random_pics_run else frame_interpolation_x_amount) + fps /= int(frame_interpolation_slow_mo_amount) if frame_interpolation_slow_mo_enabled and not is_random_pics_run else 1 + + # disable audio-adding by setting real_audio_track to None if slow-mo is enabled + if real_audio_track is not None and frame_interpolation_slow_mo_enabled: + real_audio_track = None + + # disable subtitles by setting srt_path to None if slow-mo is enabled' + if srt_path is not None and frame_interpolation_slow_mo_enabled: + srt_path = None + + if frame_interpolation_engine == 'None': + return + elif frame_interpolation_engine.startswith("RIFE"): + # make sure interp_x is valid and in range + if frame_interpolation_x_amount not in range(2, 11): + raise Error("frame_interpolation_x_amount must be between 2x and 10x") + + # set UHD to True if res' is 2K or higher + if resolution: + UHD = resolution[0] >= 2048 and resolution[1] >= 2048 + else: + UHD = False + # e.g from "RIFE v2.3 to RIFE23" + actual_model_folder_name = extract_rife_name(frame_interpolation_engine) + + # run actual rife interpolation and video stitching etc - the whole suite + return run_rife_new_video_infer(interp_x_amount=frame_interpolation_x_amount, slow_mo_enabled = frame_interpolation_slow_mo_enabled, slow_mo_x_amount=frame_interpolation_slow_mo_amount, model=actual_model_folder_name, fps=fps, deforum_models_path=deforum_models_path, audio_track=real_audio_track, raw_output_imgs_path=raw_output_imgs_path, img_batch_id=img_batch_id, ffmpeg_location=ffmpeg_location, ffmpeg_crf=ffmpeg_crf, ffmpeg_preset=ffmpeg_preset, keep_imgs=keep_interp_imgs, orig_vid_name=orig_vid_name, UHD=UHD, srt_path=srt_path) + elif frame_interpolation_engine == 'FILM': + return prepare_film_inference(deforum_models_path=deforum_models_path, x_am=frame_interpolation_x_amount, sl_enabled=frame_interpolation_slow_mo_enabled, sl_am=frame_interpolation_slow_mo_amount, keep_imgs=keep_interp_imgs, raw_output_imgs_path=raw_output_imgs_path, img_batch_id=img_batch_id, f_location=ffmpeg_location, f_crf=ffmpeg_crf, f_preset=ffmpeg_preset, fps=fps, audio_track=real_audio_track, orig_vid_name=orig_vid_name, is_random_pics_run=is_random_pics_run, srt_path=srt_path) + else: + print("Unknown Frame Interpolation engine chosen. Doing nothing.") + return None + +def prepare_film_inference(deforum_models_path, x_am, sl_enabled, sl_am, keep_imgs, raw_output_imgs_path, img_batch_id, f_location, f_crf, f_preset, fps, audio_track, orig_vid_name, is_random_pics_run, srt_path=None): + import shutil + + parent_folder = os.path.dirname(raw_output_imgs_path) + grandparent_folder = os.path.dirname(parent_folder) + if orig_vid_name is not None: + interp_vid_path = os.path.join(parent_folder, str(orig_vid_name) +'_FILM_x' + str(x_am)) + else: + interp_vid_path = os.path.join(raw_output_imgs_path, str(img_batch_id) +'_FILM_x' + str(x_am)) + + film_model_name = 'film_net_fp16.pt' + film_model_folder = os.path.join(deforum_models_path,'film_interpolation') + film_model_path = os.path.join(film_model_folder, film_model_name) # actual full path to the film .pt model file + output_interp_imgs_folder = os.path.join(raw_output_imgs_path, 'interpolated_frames_film') + # set custom name depending on if we interpolate after a run, or interpolate a video (related/unrelated to deforum, we don't know) directly from within the interpolation tab + # interpolated_path = os.path.join(args.raw_output_imgs_path, 'interpolated_frames_rife') + if orig_vid_name is not None: # interpolating a video/ set of pictures (deforum or unrelated) + custom_interp_path = "{}_{}".format(output_interp_imgs_folder, orig_vid_name) + else: # interpolating after a deforum run: + custom_interp_path = "{}_{}".format(output_interp_imgs_folder, img_batch_id) + + # interp_vid_path = os.path.join(raw_output_imgs_path, str(img_batch_id) + '_FILM_x' + str(x_am)) + img_path_for_ffmpeg = os.path.join(custom_interp_path, "frame_%09d.png") + + if sl_enabled: + interp_vid_path = interp_vid_path + '_slomo_x' + str(sl_am) + interp_vid_path = interp_vid_path + '.mp4' + + # In this folder we temporarily keep the original frames (converted/ copy-pasted and img format depends on scenario) + temp_convert_raw_png_path = os.path.join(raw_output_imgs_path, "tmp_film_folder") + if is_random_pics_run: # pass dummy so it just copy-paste the imgs instead of re-writing them + total_frames = duplicate_pngs_from_folder(raw_output_imgs_path, temp_convert_raw_png_path, img_batch_id, 'DUMMY') + else: #re-write pics as png to avert a problem with 24 and 32 mixed outputs from the same animation run + total_frames = duplicate_pngs_from_folder(raw_output_imgs_path, temp_convert_raw_png_path, img_batch_id, None) + check_and_download_film_model('film_net_fp16.pt', film_model_folder) # TODO: split this part + + # get number of in-between-frames to provide to FILM - mimics how RIFE works, we should get the same amount of total frames in the end + film_in_between_frames_count = calculate_frames_to_add(total_frames, x_am) + # Run actual FILM inference + run_film_interp_infer( + model_path = film_model_path, + input_folder = temp_convert_raw_png_path, + save_folder = custom_interp_path, # output folder is created in the infer part + inter_frames = film_in_between_frames_count) + + add_soundtrack = 'None' + if not audio_track is None: + add_soundtrack = 'File' + + print (f"*Passing interpolated frames to ffmpeg...*") + exception_raised = False + try: + ffmpeg_stitch_video(ffmpeg_location=f_location, fps=fps, outmp4_path=interp_vid_path, stitch_from_frame=0, stitch_to_frame=999999999, imgs_path=img_path_for_ffmpeg, add_soundtrack=add_soundtrack, audio_path=audio_track, crf=f_crf, preset=f_preset, srt_path=srt_path) + except Exception as e: + exception_raised = True + print(f"An error occurred while stitching the video: {e}") + + if orig_vid_name and (keep_imgs or exception_raised): + shutil.move(custom_interp_path, parent_folder) + if not keep_imgs and not exception_raised: + if fps <= 450: # keep interp frames automatically if out_vid fps is above 450 + shutil.rmtree(custom_interp_path, ignore_errors=True) + # delete duplicated raw non-interpolated frames + shutil.rmtree(temp_convert_raw_png_path, ignore_errors=True) + # remove folder with raw (non-interpolated) vid input frames in case of input VID and not PNGs + if orig_vid_name: + shutil.rmtree(raw_output_imgs_path, ignore_errors=True) + + return interp_vid_path + +def check_and_download_film_model(model_name, model_dest_folder): + from basicsr.utils.download_util import load_file_from_url + if model_name == 'film_net_fp16.pt': + model_dest_path = os.path.join(model_dest_folder, model_name) + download_url = 'https://github.com/hithereai/frame-interpolation-pytorch/releases/download/film_net_fp16.pt/film_net_fp16.pt' + film_model_hash = '0a823815b111488ac2b7dd7fe6acdd25d35a22b703e8253587764cf1ee3f8f93676d24154d9536d2ce5bc3b2f102fb36dfe0ca230dfbe289d5cd7bde5a34ec12' + else: # Unknown FILM model + raise Exception("Got a request to download an unknown FILM model. Can't proceed.") + if os.path.exists(model_dest_path): + return + try: + os.makedirs(model_dest_folder, exist_ok=True) + # download film model from url + load_file_from_url(download_url, model_dest_folder) + # verify checksum + if checksum(model_dest_path) != film_model_hash: + raise Exception(f"Error while downloading {model_name}. Please download from: {download_url}, and put in: {model_dest_folder}") + except Exception as e: + raise Exception(f"Error while downloading {model_name}. Please download from: {download_url}, and put in: {model_dest_folder}") + +# get film no. of frames to add after each pic from tot frames in interp_x values +def calculate_frames_to_add(total_frames, interp_x): + frames_to_add = (total_frames * interp_x - total_frames) / (total_frames - 1) + return int(round(frames_to_add)) + +def process_interp_pics_upload_logic(pic_list, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, fps, f_models_path, resolution, add_soundtrack, audio_track): + pic_path_list = [pic.name for pic in pic_list] + print(f"got a request to *frame interpolate* a set of {len(pic_list)} images.") + folder_name = clean_folder_name(Path(pic_list[0].orig_name).stem) + outdir_no_tmp = os.path.join(os.getcwd(), 'outputs', 'frame-interpolation', folder_name) + i = 1 + while os.path.exists(outdir_no_tmp): + outdir_no_tmp = os.path.join(os.getcwd(), 'outputs', 'frame-interpolation', folder_name + '_' + str(i)) + i += 1 + + outdir = os.path.join(outdir_no_tmp, 'tmp_input_frames') + os.makedirs(outdir, exist_ok=True) + + convert_images_from_list(paths=pic_path_list, output_dir=outdir,format='png') + + audio_file_to_pass = None + # todo? add handling of vid input sound? if needed at all... + if add_soundtrack == 'File': + audio_file_to_pass = audio_track + # todo: upgrade function so it takes url and check if audio really exist before passing? not crucial as ffmpeg sofly fallbacks if needed + # if media_file_has_audio(audio_track, f_location): + + # pass param so it won't duplicate the images at all as we already do it in here?! + process_video_interpolation(frame_interpolation_engine=engine, frame_interpolation_x_amount=x_am, frame_interpolation_slow_mo_enabled = sl_enabled,frame_interpolation_slow_mo_amount=sl_am, orig_vid_fps=fps, deforum_models_path=f_models_path, real_audio_track=audio_file_to_pass, raw_output_imgs_path=outdir, img_batch_id=None, ffmpeg_location=f_location, ffmpeg_crf=f_crf, ffmpeg_preset=f_preset, keep_interp_imgs=keep_imgs, orig_vid_name=folder_name, resolution=resolution, dont_change_fps=True) \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/general_utils.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/general_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..db94989ab34d63438ec9688f074fd7987c9f38b0 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/general_utils.py @@ -0,0 +1,144 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import shutil +import hashlib +from modules.shared import opts +from basicsr.utils.download_util import load_file_from_url + +def debug_print(message): + DEBUG_MODE = opts.data.get("deforum_debug_mode_enabled", False) + if DEBUG_MODE: + print(message) + +def checksum(filename, hash_factory=hashlib.blake2b, chunk_num_blocks=128): + h = hash_factory() + with open(filename,'rb') as f: + while chunk := f.read(chunk_num_blocks*h.block_size): + h.update(chunk) + return h.hexdigest() + +def get_os(): + import platform + return {"Windows": "Windows", "Linux": "Linux", "Darwin": "Mac"}.get(platform.system(), "Unknown") + +# used in src/rife/inference_video.py and more, soon +def duplicate_pngs_from_folder(from_folder, to_folder, img_batch_id, orig_vid_name): + import cv2 + #TODO: don't copy-paste at all if the input is a video (now it copy-pastes, and if input is deforum run is also converts to make sure no errors rise cuz of 24-32 bit depth differences) + temp_convert_raw_png_path = os.path.join(from_folder, to_folder) + os.makedirs(temp_convert_raw_png_path, exist_ok=True) + + frames_handled = 0 + for f in os.listdir(from_folder): + if ('png' in f or 'jpg' in f) and '-' not in f and '_depth_' not in f and ((img_batch_id is not None and f.startswith(img_batch_id) or img_batch_id is None)): + frames_handled +=1 + original_img_path = os.path.join(from_folder, f) + if orig_vid_name is not None: + shutil.copy(original_img_path, temp_convert_raw_png_path) + else: + image = cv2.imread(original_img_path) + new_path = os.path.join(temp_convert_raw_png_path, f) + cv2.imwrite(new_path, image, [cv2.IMWRITE_PNG_COMPRESSION, 0]) + return frames_handled + +def convert_images_from_list(paths, output_dir, format): + import os + from PIL import Image + # Ensure that the output directory exists + os.makedirs(output_dir, exist_ok=True) + + # Loop over all input images + for i, path in enumerate(paths): + # Open the image + with Image.open(path) as img: + # Generate the output filename + filename = f"{i+1:09d}.{format}" + # Save the image to the output directory + img.save(os.path.join(output_dir, filename)) + +def get_deforum_version(): + from modules import extensions as mext + try: + for ext in mext.extensions: + if ext.name in ["deforum", "deforum-for-automatic1111-webui", "sd-webui-deforum"] and ext.enabled: + ext.read_info_from_repo() # need this call to get exten info on ui-launch, not to be removed + return ext.version + return "Unknown" + except: + return "Unknown" + +def custom_placeholder_format(value_dict, placeholder_match): + key = placeholder_match.group(1).lower() + value = value_dict.get(key, key) or "_" + if isinstance(value, dict) and value: + first_key = list(value.keys())[0] + value = str(value[first_key][0]) if isinstance(value[first_key], list) and value[first_key] else str(value[first_key]) + return str(value)[:50] + +def test_long_path_support(base_folder_path): + long_folder_name = 'A' * 300 + long_path = os.path.join(base_folder_path, long_folder_name) + try: + os.makedirs(long_path) + shutil.rmtree(long_path) + return True + except OSError: + return False + +def get_max_path_length(base_folder_path): + if get_os() == 'Windows': + return (32767 if test_long_path_support(base_folder_path) else 260) - len(base_folder_path) - 1 + return 4096 - len(base_folder_path) - 1 + +def substitute_placeholders(template, arg_list, base_folder_path): + import re + # Find and update timestring values if resume_from_timestring is True + resume_from_timestring = next((arg_obj.resume_from_timestring for arg_obj in arg_list if hasattr(arg_obj, 'resume_from_timestring')), False) + resume_timestring = next((arg_obj.resume_timestring for arg_obj in arg_list if hasattr(arg_obj, 'resume_timestring')), None) + + if resume_from_timestring and resume_timestring: + for arg_obj in arg_list: + if hasattr(arg_obj, 'timestring'): + arg_obj.timestring = resume_timestring + + max_length = get_max_path_length(base_folder_path) + values = {attr.lower(): getattr(arg_obj, attr) + for arg_obj in arg_list + for attr in dir(arg_obj) if not callable(getattr(arg_obj, attr)) and not attr.startswith('__')} + formatted_string = re.sub(r"{(\w+)}", lambda m: custom_placeholder_format(values, m), template) + formatted_string = re.sub(r'[<>:"/\\|?*\s,]', '_', formatted_string) + return formatted_string[:max_length] + +def count_files_in_folder(folder_path): + import glob + file_pattern = folder_path + "/*" + file_count = len(glob.glob(file_pattern)) + return file_count + +def clean_gradio_path_strings(input_str): + if isinstance(input_str, str) and input_str.startswith('"') and input_str.endswith('"'): + return input_str[1:-1] + else: + return input_str + +def download_file_with_checksum(url, expected_checksum, dest_folder, dest_filename): + expected_full_path = os.path.join(dest_folder, dest_filename) + if not os.path.exists(expected_full_path) and not os.path.isdir(expected_full_path): + load_file_from_url(url=url, model_dir=dest_folder, file_name=dest_filename, progress=True) + if checksum(expected_full_path) != expected_checksum: + raise Exception(f"Error while downloading {dest_filename}.]nPlease manually download from: {url}\nAnd place it in: {dest_folder}") \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/generate.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/generate.py new file mode 100644 index 0000000000000000000000000000000000000000..00ea3e58dbfeffd962b7d0f04944924a3891eb2d --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/generate.py @@ -0,0 +1,368 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +from PIL import Image +import math +import json +import itertools +import requests +import numexpr +from modules import processing, sd_models +from modules.shared import sd_model, state, cmd_opts +from .deforum_controlnet import is_controlnet_enabled, process_with_controlnet +from .prompt import split_weighted_subprompts +from .load_images import load_img, prepare_mask, check_mask_for_errors +from .webui_sd_pipeline import get_webui_sd_pipeline +from .rich import console +from .defaults import get_samplers_list +from .prompt import check_is_number +import cv2 +import numpy as np +from types import SimpleNamespace + +from .general_utils import debug_print + +def load_mask_latent(mask_input, shape): + # mask_input (str or PIL Image.Image): Path to the mask image or a PIL Image object + # shape (list-like len(4)): shape of the image to match, usually latent_image.shape + + if isinstance(mask_input, str): # mask input is probably a file name + if mask_input.startswith('http://') or mask_input.startswith('https://'): + mask_image = Image.open(requests.get(mask_input, stream=True).raw).convert('RGBA') + else: + mask_image = Image.open(mask_input).convert('RGBA') + elif isinstance(mask_input, Image.Image): + mask_image = mask_input + else: + raise Exception("mask_input must be a PIL image or a file name") + + mask_w_h = (shape[-1], shape[-2]) + mask = mask_image.resize(mask_w_h, resample=Image.LANCZOS) + mask = mask.convert("L") + return mask + +def isJson(myjson): + try: + json.loads(myjson) + except ValueError as e: + return False + return True + +# Add pairwise implementation here not to upgrade +# the whole python to 3.10 just for one function +def pairwise_repl(iterable): + a, b = itertools.tee(iterable) + next(b, None) + return zip(a, b) + +def generate(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame=0, sampler_name=None): + if state.interrupted: + return None + + if args.reroll_blank_frames == 'ignore': + return generate_inner(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame, sampler_name) + + image, caught_vae_exception = generate_with_nans_check(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame, sampler_name) + + if caught_vae_exception or not image.getbbox(): + patience = args.reroll_patience + print("Blank frame detected! If you don't have the NSFW filter enabled, this may be due to a glitch!") + if args.reroll_blank_frames == 'reroll': + while caught_vae_exception or not image.getbbox(): + print("Rerolling with +1 seed...") + args.seed += 1 + image, caught_vae_exception = generate_with_nans_check(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame, sampler_name) + patience -= 1 + if patience == 0: + print("Rerolling with +1 seed failed for 10 iterations! Try setting webui's precision to 'full' and if it fails, please report this to the devs! Interrupting...") + state.interrupted = True + state.assign_current_image(image) + return None + elif args.reroll_blank_frames == 'interrupt': + print("Interrupting to save your eyes...") + state.interrupted = True + state.assign_current_image(image) + return None + return image + +def generate_with_nans_check(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame=0, sampler_name=None): + if cmd_opts.disable_nan_check: + image = generate_inner(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame, sampler_name) + else: + try: + image = generate_inner(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame, sampler_name) + except Exception as e: + if "A tensor with all NaNs was produced in VAE." in repr(e): + print(e) + return None, True + else: + raise e + return image, False + +def generate_inner(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame=0, sampler_name=None): + # Setup the pipeline + p = get_webui_sd_pipeline(args, root) + p.prompt, p.negative_prompt = split_weighted_subprompts(args.prompt, frame, anim_args.max_frames) + + if not args.use_init and args.strength > 0 and args.strength_0_no_init: + args.strength = 0 + processed = None + mask_image = None + init_image = None + image_init0 = None + image_init0_box = None + + if loop_args.use_looper and anim_args.animation_mode in ['2D', '3D']: + + debug_print(f"Looper: use_looper={loop_args.use_looper}, imageStrength={loop_args.imageStrength}, blendFactorMax={loop_args.blendFactorMax}, blendFactorSlope={loop_args.blendFactorSlope}, tweeningFrames={loop_args.tweeningFrameSchedule}, colorCorrectionFactor={loop_args.colorCorrectionFactor}") + args.strength = loop_args.imageStrength + tweeningFrames = loop_args.tweeningFrameSchedule + blendFactor = .07 + colorCorrectionFactor = loop_args.colorCorrectionFactor + jsonImages = json.loads(loop_args.imagesToKeyframe) + # find which image to show + parsedImages = {} + frameToChoose = 0 + max_f = anim_args.max_frames - 1 + + for key, value in jsonImages.items(): + if check_is_number(key): # default case 0:(1 + t %5), 30:(5-t%2) + parsedImages[key] = value + else: # math on the left hand side case 0:(1 + t %5), maxKeyframes/2:(5-t%2) + parsedImages[int(numexpr.evaluate(key))] = value + + framesToImageSwapOn = list(map(int, list(parsedImages.keys()))) + + for swappingFrame in framesToImageSwapOn[1:]: + frameToChoose += (frame >= int(swappingFrame)) + + # find which frame to do our swapping on for tweening + skipFrame = 25 + for fs, fe in pairwise_repl(framesToImageSwapOn): + if fs <= frame <= fe: + skipFrame = fe - fs + + if frame % skipFrame <= tweeningFrames: # number of tweening frames + blendFactor = loop_args.blendFactorMax - loop_args.blendFactorSlope * math.cos((frame % tweeningFrames) / (tweeningFrames / 2)) + init_image2, _ = load_img(list(jsonImages.values())[frameToChoose], + None, # init_image_box not used in this case + shape=(args.W, args.H), + use_alpha_as_mask=args.use_alpha_as_mask) + image_init0 = list(jsonImages.values())[0] + + else: # they passed in a single init image + image_init0 = args.init_image + image_init0_box = args.init_image_box + + available_samplers = get_samplers_list() + if sampler_name is not None: + if sampler_name in available_samplers.keys(): + p.sampler_name = available_samplers[sampler_name] + else: + raise RuntimeError(f"Sampler name '{sampler_name}' is invalid. Please check the available sampler list in the 'Run' tab") + + if args.checkpoint is not None: + info = sd_models.get_closet_checkpoint_match(args.checkpoint) + if info is None: + raise RuntimeError(f"Unknown checkpoint: {args.checkpoint}") + sd_models.reload_model_weights(info=info) + + if root.init_sample is not None: + # TODO: cleanup init_sample remains later + img = root.init_sample + init_image = img + if loop_args.use_looper and isJson(loop_args.imagesToKeyframe) and anim_args.animation_mode in ['2D', '3D']: + init_image = Image.blend(init_image, init_image2, blendFactor) + correction_colors = Image.blend(init_image, init_image2, colorCorrectionFactor) + p.color_corrections = [processing.setup_color_correction(correction_colors)] + + # this is the first pass + elif (loop_args.use_looper and anim_args.animation_mode in ['2D', '3D']) or (args.use_init and ((args.init_image != None and args.init_image != '') or args.init_image_box != None)): + init_image, mask_image = load_img(image_init0, # initial init image + image_init0_box, # initial init image from box (if single init image is used, not json list) + shape=(args.W, args.H), + use_alpha_as_mask=args.use_alpha_as_mask) + + else: + + if anim_args.animation_mode != 'Interpolation': + print(f"Not using an init image (doing pure txt2img)") + + if args.motion_preview_mode: + state.assign_current_image(root.default_img) + processed = SimpleNamespace(images = [root.default_img], info = "Generating motion preview...") + else: + p_txt = processing.StableDiffusionProcessingTxt2Img( + sd_model=sd_model, + outpath_samples=root.tmp_deforum_run_duplicated_folder, + outpath_grids=root.tmp_deforum_run_duplicated_folder, + prompt=p.prompt, + styles=p.styles, + negative_prompt=p.negative_prompt, + seed=p.seed, + subseed=p.subseed, + subseed_strength=p.subseed_strength, + seed_resize_from_h=p.seed_resize_from_h, + seed_resize_from_w=p.seed_resize_from_w, + sampler_name=p.sampler_name, + batch_size=p.batch_size, + n_iter=p.n_iter, + steps=p.steps, + cfg_scale=p.cfg_scale, + width=p.width, + height=p.height, + restore_faces=p.restore_faces, + tiling=p.tiling, + enable_hr=False, + denoising_strength=0, + ) + + print_combined_table(args, anim_args, p_txt, keys, frame) # print dynamic table to cli + + if is_controlnet_enabled(controlnet_args): + process_with_controlnet(p_txt, args, anim_args, controlnet_args, root, parseq_adapter, is_img2img=False, frame_idx=frame) + + processed = processing.process_images(p_txt) + + try: + p_txt.close() + except Exception as e: + ... + + if processed is None: + # Mask functions + if args.use_mask: + mask_image = args.mask_image + mask = prepare_mask(args.mask_file if mask_image is None else mask_image, + (args.W, args.H), + args.mask_contrast_adjust, + args.mask_brightness_adjust) + p.inpainting_mask_invert = args.invert_mask + p.inpainting_fill = args.fill + p.inpaint_full_res = args.full_res_mask + p.inpaint_full_res_padding = args.full_res_mask_padding + # prevent loaded mask from throwing errors in Image operations if completely black and crop and resize in webui pipeline + # doing this after contrast and brightness adjustments to ensure that mask is not passed as black or blank + mask = check_mask_for_errors(mask, args.invert_mask) + root.noise_mask = mask + else: + mask = None + + assert not ((mask is not None and args.use_mask and args.overlay_mask) and ( + root.init_sample is None and init_image is None)), "Need an init image when use_mask == True and overlay_mask == True" + + p.init_images = [init_image] + p.image_mask = mask + p.image_cfg_scale = args.pix2pix_img_cfg_scale + + print_combined_table(args, anim_args, p, keys, frame) # print dynamic table to cli + + if args.motion_preview_mode: + processed = mock_process_images(args, p, init_image) + else: + if is_controlnet_enabled(controlnet_args): + process_with_controlnet(p, args, anim_args, controlnet_args, root, parseq_adapter, is_img2img=True, frame_idx=frame) + + processed = processing.process_images(p) + + + if root.initial_info is None: + root.initial_info = processed.info + + if root.first_frame is None: + root.first_frame = processed.images[0] + + results = processed.images[0] + + return results + +# Run this instead of actual diffusion when doing motion preview. +def mock_process_images(args, p, init_image): + + input_image = cv2.cvtColor(np.array(init_image), cv2.COLOR_RGB2BGR) + + start_point = (int(args.H/3), int(args.W/3)) + end_point = (int(args.H-args.H/3), int(args.W-args.W/3)) + color = (255, 255, 255, float(p.denoising_strength)) + thickness = 2 + mock_generated_image = np.zeros_like(input_image, np.uint8) + cv2.rectangle(mock_generated_image, start_point, end_point, color, thickness) + + + blend = cv2.addWeighted(input_image, float(1.0-p.denoising_strength), mock_generated_image, float(p.denoising_strength), 0) + + image = Image.fromarray(cv2.cvtColor(blend, cv2.COLOR_BGR2RGB)) + state.assign_current_image(image) + return SimpleNamespace(images = [image], info = "Generating motion preview...") + +def print_combined_table(args, anim_args, p, keys, frame_idx): + from rich.table import Table + from rich import box + + table = Table(padding=0, box=box.ROUNDED) + + field_names1 = ["Steps", "CFG"] + if anim_args.animation_mode != 'Interpolation': + field_names1.append("Denoise") + field_names1 += ["Subseed", "Subs. str"] * (anim_args.enable_subseed_scheduling) + field_names1 += ["Sampler"] * anim_args.enable_sampler_scheduling + field_names1 += ["Checkpoint"] * anim_args.enable_checkpoint_scheduling + + for field_name in field_names1: + table.add_column(field_name, justify="center") + + rows1 = [str(p.steps), str(p.cfg_scale)] + if anim_args.animation_mode != 'Interpolation': + rows1.append(f"{p.denoising_strength:.5g}" if p.denoising_strength is not None else "None") + + rows1 += [str(p.subseed), f"{p.subseed_strength:.5g}"] * anim_args.enable_subseed_scheduling + rows1 += [p.sampler_name] * anim_args.enable_sampler_scheduling + rows1 += [str(args.checkpoint)] * anim_args.enable_checkpoint_scheduling + + rows2 = [] + if anim_args.animation_mode not in ['Video Input', 'Interpolation']: + if anim_args.animation_mode == '2D': + field_names2 = ["Angle", "Zoom", "Tr C X", "Tr C Y"] + else: + field_names2 = [] + field_names2 += ["Tr X", "Tr Y"] + if anim_args.animation_mode == '3D': + field_names2 += ["Tr Z", "Ro X", "Ro Y", "Ro Z"] + if anim_args.aspect_ratio_schedule.replace(" ", "") != '0:(1)': + field_names2 += ["Asp. Ratio"] + if anim_args.enable_perspective_flip: + field_names2 += ["Pf T", "Pf P", "Pf G", "Pf F"] + + for field_name in field_names2: + table.add_column(field_name, justify="center") + + if anim_args.animation_mode == '2D': + rows2 += [f"{keys.angle_series[frame_idx]:.5g}", f"{keys.zoom_series[frame_idx]:.5g}", + f"{keys.transform_center_x_series[frame_idx]:.5g}", f"{keys.transform_center_y_series[frame_idx]:.5g}"] + + rows2 += [f"{keys.translation_x_series[frame_idx]:.5g}", f"{keys.translation_y_series[frame_idx]:.5g}"] + + if anim_args.animation_mode == '3D': + rows2 += [f"{keys.translation_z_series[frame_idx]:.5g}", f"{keys.rotation_3d_x_series[frame_idx]:.5g}", + f"{keys.rotation_3d_y_series[frame_idx]:.5g}", f"{keys.rotation_3d_z_series[frame_idx]:.5g}"] + if anim_args.aspect_ratio_schedule.replace(" ", "") != '0:(1)': + rows2 += [f"{keys.aspect_ratio_series[frame_idx]:.5g}"] + if anim_args.enable_perspective_flip: + rows2 += [f"{keys.perspective_flip_theta_series[frame_idx]:.5g}", f"{keys.perspective_flip_phi_series[frame_idx]:.5g}", + f"{keys.perspective_flip_gamma_series[frame_idx]:.5g}", f"{keys.perspective_flip_fv_series[frame_idx]:.5g}"] + + table.add_row(*rows1, *rows2) + console.print(table) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/gradio_funcs.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/gradio_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..0cf70b52820be004234360ae005b0d6e01a9ab75 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/gradio_funcs.py @@ -0,0 +1,296 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import gradio as gr +import modules.paths as ph +from .general_utils import get_os +from .upscaling import process_ncnn_upscale_vid_upload_logic +from .video_audio_utilities import extract_number, get_quick_vid_info, get_ffmpeg_params +from .frame_interpolation import process_interp_vid_upload_logic, process_interp_pics_upload_logic, gradio_f_interp_get_fps_and_fcount +from .vid2depth import process_depth_vid_upload_logic + +f_models_path = ph.models_path + '/Deforum' + +def handle_change_functions(l_vars): + l_vars['override_settings_with_file'].change(fn=hide_if_false, inputs=l_vars['override_settings_with_file'], outputs=l_vars['custom_settings_file']) + l_vars['sampler'].change(fn=show_when_ddim, inputs=l_vars['sampler'], outputs=l_vars['enable_ddim_eta_scheduling']) + l_vars['sampler'].change(fn=show_when_ancestral_samplers, inputs=l_vars['sampler'], outputs=l_vars['enable_ancestral_eta_scheduling']) + l_vars['enable_ancestral_eta_scheduling'].change(fn=hide_if_false, inputs=l_vars['enable_ancestral_eta_scheduling'], outputs=l_vars['ancestral_eta_schedule']) + l_vars['enable_ddim_eta_scheduling'].change(fn=hide_if_false, inputs=l_vars['enable_ddim_eta_scheduling'], outputs=l_vars['ddim_eta_schedule']) + l_vars['animation_mode'].change(fn=change_max_frames_visibility, inputs=l_vars['animation_mode'], outputs=l_vars['max_frames']) + diffusion_cadence_outputs = [l_vars['diffusion_cadence'], l_vars['guided_images_accord'], l_vars['optical_flow_cadence_row'], l_vars['cadence_flow_factor_schedule'], + l_vars['optical_flow_redo_generation'], l_vars['redo_flow_factor_schedule'], l_vars['diffusion_redo']] + for output in diffusion_cadence_outputs: + l_vars['animation_mode'].change(fn=change_diffusion_cadence_visibility, inputs=l_vars['animation_mode'], outputs=output) + three_d_related_outputs = [l_vars['only_3d_motion_column'], l_vars['depth_warp_row_1'], l_vars['depth_warp_row_2'], l_vars['depth_warp_row_3'], l_vars['depth_warp_row_4'], + l_vars['depth_warp_row_5'], l_vars['depth_warp_row_6'], l_vars['depth_warp_row_7']] + for output in three_d_related_outputs: + l_vars['animation_mode'].change(fn=disble_3d_related_stuff, inputs=l_vars['animation_mode'], outputs=output) + pers_flip_outputs = [l_vars['per_f_th_row'], l_vars['per_f_ph_row'], l_vars['per_f_ga_row'], l_vars['per_f_f_row']] + for output in pers_flip_outputs: + l_vars['enable_perspective_flip'].change(fn=hide_if_false, inputs=l_vars['enable_perspective_flip'], outputs=output) + l_vars['animation_mode'].change(fn=per_flip_handle, inputs=[l_vars['animation_mode'], l_vars['enable_perspective_flip']], outputs=output) + l_vars['animation_mode'].change(fn=only_show_in_non_3d_mode, inputs=l_vars['animation_mode'], outputs=l_vars['depth_warp_msg_html']) + l_vars['animation_mode'].change(fn=enable_2d_related_stuff, inputs=l_vars['animation_mode'], outputs=l_vars['only_2d_motion_column']) + l_vars['animation_mode'].change(fn=disable_by_interpolation, inputs=l_vars['animation_mode'], outputs=l_vars['color_force_grayscale']) + l_vars['animation_mode'].change(fn=disable_by_interpolation, inputs=l_vars['animation_mode'], outputs=l_vars['noise_tab_column']) + l_vars['animation_mode'].change(fn=disable_pers_flip_accord, inputs=l_vars['animation_mode'], outputs=l_vars['enable_per_f_row']) + l_vars['animation_mode'].change(fn=disable_pers_flip_accord, inputs=l_vars['animation_mode'], outputs=l_vars['both_anim_mode_motion_params_column']) + l_vars['aspect_ratio_use_old_formula'].change(fn=hide_if_true, inputs=l_vars['aspect_ratio_use_old_formula'], outputs=l_vars['aspect_ratio_schedule']) + l_vars['animation_mode'].change(fn=show_hybrid_html_msg, inputs=l_vars['animation_mode'], outputs=l_vars['hybrid_msg_html']) + l_vars['animation_mode'].change(fn=change_hybrid_tab_status, inputs=l_vars['animation_mode'], outputs=l_vars['hybrid_sch_accord']) + l_vars['animation_mode'].change(fn=change_hybrid_tab_status, inputs=l_vars['animation_mode'], outputs=l_vars['hybrid_settings_accord']) + l_vars['animation_mode'].change(fn=change_hybrid_tab_status, inputs=l_vars['animation_mode'], outputs=l_vars['humans_masking_accord']) + l_vars['optical_flow_redo_generation'].change(fn=hide_if_none, inputs=l_vars['optical_flow_redo_generation'], outputs=l_vars['redo_flow_factor_schedule_column']) + l_vars['optical_flow_cadence'].change(fn=hide_if_none, inputs=l_vars['optical_flow_cadence'], outputs=l_vars['cadence_flow_factor_schedule_column']) + l_vars['seed_behavior'].change(fn=change_seed_iter_visibility, inputs=l_vars['seed_behavior'], outputs=l_vars['seed_iter_N_row']) + l_vars['seed_behavior'].change(fn=change_seed_schedule_visibility, inputs=l_vars['seed_behavior'], outputs=l_vars['seed_schedule_row']) + l_vars['color_coherence'].change(fn=change_color_coherence_video_every_N_frames_visibility, inputs=l_vars['color_coherence'], outputs=l_vars['color_coherence_video_every_N_frames_row']) + l_vars['color_coherence'].change(fn=change_color_coherence_image_path_visibility, inputs=l_vars['color_coherence'], outputs=l_vars['color_coherence_image_path_row']) + l_vars['noise_type'].change(fn=change_perlin_visibility, inputs=l_vars['noise_type'], outputs=l_vars['perlin_row']) + l_vars['diffusion_cadence'].change(fn=hide_optical_flow_cadence, inputs=l_vars['diffusion_cadence'], outputs=l_vars['optical_flow_cadence_row']) + l_vars['depth_algorithm'].change(fn=legacy_3d_mode, inputs=l_vars['depth_algorithm'], outputs=l_vars['midas_weight']) + l_vars['depth_algorithm'].change(fn=show_leres_html_msg, inputs=l_vars['depth_algorithm'], outputs=l_vars['leres_license_msg']) + l_vars['fps'].change(fn=change_gif_button_visibility, inputs=l_vars['fps'], outputs=l_vars['make_gif']) + l_vars['r_upscale_model'].change(fn=update_r_upscale_factor, inputs=l_vars['r_upscale_model'], outputs=l_vars['r_upscale_factor']) + l_vars['ncnn_upscale_model'].change(fn=update_r_upscale_factor, inputs=l_vars['ncnn_upscale_model'], outputs=l_vars['ncnn_upscale_factor']) + l_vars['ncnn_upscale_model'].change(update_upscale_out_res_by_model_name, inputs=[l_vars['ncnn_upscale_in_vid_res'], l_vars['ncnn_upscale_model']], + outputs=l_vars['ncnn_upscale_out_vid_res']) + l_vars['ncnn_upscale_factor'].change(update_upscale_out_res, inputs=[l_vars['ncnn_upscale_in_vid_res'], l_vars['ncnn_upscale_factor']], outputs=l_vars['ncnn_upscale_out_vid_res']) + l_vars['vid_to_upscale_chosen_file'].change(vid_upscale_gradio_update_stats, inputs=[l_vars['vid_to_upscale_chosen_file'], l_vars['ncnn_upscale_factor']], + outputs=[l_vars['ncnn_upscale_in_vid_fps_ui_window'], l_vars['ncnn_upscale_in_vid_frame_count_window'], l_vars['ncnn_upscale_in_vid_res'], + l_vars['ncnn_upscale_out_vid_res']]) + l_vars['hybrid_comp_mask_type'].change(fn=hide_if_none, inputs=l_vars['hybrid_comp_mask_type'], outputs=l_vars['hybrid_comp_mask_row']) + hybrid_motion_outputs = [l_vars['hybrid_flow_method'], l_vars['hybrid_flow_factor_schedule'], l_vars['hybrid_flow_consistency'], l_vars['hybrid_consistency_blur'], + l_vars['hybrid_motion_use_prev_img']] + for output in hybrid_motion_outputs: + l_vars['hybrid_motion'].change(fn=disable_by_non_optical_flow, inputs=l_vars['hybrid_motion'], outputs=output) + l_vars['hybrid_flow_consistency'].change(fn=hide_if_false, inputs=l_vars['hybrid_flow_consistency'], outputs=l_vars['hybrid_consistency_blur']) + l_vars['hybrid_composite'].change(fn=disable_by_hybrid_composite_dynamic, inputs=[l_vars['hybrid_composite'], l_vars['hybrid_comp_mask_type']], outputs=l_vars['hybrid_comp_mask_row']) + hybrid_composite_outputs = [l_vars['humans_masking_accord'], l_vars['hybrid_sch_accord'], l_vars['hybrid_comp_mask_type'], l_vars['hybrid_use_first_frame_as_init_image'], + l_vars['hybrid_use_init_image']] + for output in hybrid_composite_outputs: + l_vars['hybrid_composite'].change(fn=hide_if_false, inputs=l_vars['hybrid_composite'], outputs=output) + hybrid_comp_mask_type_outputs = [l_vars['hybrid_comp_mask_blend_alpha_schedule_row'], l_vars['hybrid_comp_mask_contrast_schedule_row'], + l_vars['hybrid_comp_mask_auto_contrast_cutoff_high_schedule_row'], + l_vars['hybrid_comp_mask_auto_contrast_cutoff_low_schedule_row']] + for output in hybrid_comp_mask_type_outputs: + l_vars['hybrid_comp_mask_type'].change(fn=hide_if_none, inputs=l_vars['hybrid_comp_mask_type'], outputs=output) + # End of hybrid related + skip_video_creation_outputs = [l_vars['fps_out_format_row'], l_vars['soundtrack_row'], l_vars['store_frames_in_ram'], l_vars['make_gif'], l_vars['r_upscale_row'], + l_vars['delete_imgs'], l_vars['delete_input_frames']] + for output in skip_video_creation_outputs: + l_vars['skip_video_creation'].change(fn=change_visibility_from_skip_video, inputs=l_vars['skip_video_creation'], outputs=output) + l_vars['frame_interpolation_slow_mo_enabled'].change(fn=hide_if_false, inputs=l_vars['frame_interpolation_slow_mo_enabled'], outputs=l_vars['frame_interp_slow_mo_amount_column']) + l_vars['frame_interpolation_engine'].change(fn=change_interp_x_max_limit, inputs=[l_vars['frame_interpolation_engine'], l_vars['frame_interpolation_x_amount']], + outputs=l_vars['frame_interpolation_x_amount']) + # Populate the FPS and FCount values as soon as a video is uploaded to the FileUploadBox (vid_to_interpolate_chosen_file) + l_vars['vid_to_interpolate_chosen_file'].change(gradio_f_interp_get_fps_and_fcount, + inputs=[l_vars['vid_to_interpolate_chosen_file'], l_vars['frame_interpolation_x_amount'], l_vars['frame_interpolation_slow_mo_enabled'], + l_vars['frame_interpolation_slow_mo_amount']], + outputs=[l_vars['in_vid_fps_ui_window'], l_vars['in_vid_frame_count_window'], l_vars['out_interp_vid_estimated_fps']]) + l_vars['vid_to_interpolate_chosen_file'].change(fn=hide_interp_stats, inputs=[l_vars['vid_to_interpolate_chosen_file']], outputs=[l_vars['interp_live_stats_row']]) + interp_hide_list = [l_vars['frame_interpolation_slow_mo_enabled'], l_vars['frame_interpolation_keep_imgs'], l_vars['frame_interpolation_use_upscaled'], l_vars['frame_interp_amounts_row'], l_vars['interp_existing_video_row']] + for output in interp_hide_list: + l_vars['frame_interpolation_engine'].change(fn=hide_interp_by_interp_status, inputs=l_vars['frame_interpolation_engine'], outputs=output) + +# START gradio-to-frame-interoplation/ upscaling functions +def upload_vid_to_interpolate(file, engine, x_am, sl_enabled, sl_am, keep_imgs, in_vid_fps): + # print msg and do nothing if vid not uploaded or interp_x not provided + if not file or engine == 'None': + return print("Please upload a video and set a proper value for 'Interp X'. Can't interpolate x0 times :)") + f_location, f_crf, f_preset = get_ffmpeg_params() + + process_interp_vid_upload_logic(file, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, in_vid_fps, f_models_path, file.orig_name) + +def upload_pics_to_interpolate(pic_list, engine, x_am, sl_enabled, sl_am, keep_imgs, fps, add_audio, audio_track): + from PIL import Image + + if pic_list is None or len(pic_list) < 2: + return print("Please upload at least 2 pics for interpolation.") + f_location, f_crf, f_preset = get_ffmpeg_params() + # make sure all uploaded pics have the same resolution + pic_sizes = [Image.open(picture_path.name).size for picture_path in pic_list] + if len(set(pic_sizes)) != 1: + return print("All uploaded pics need to be of the same Width and Height / resolution.") + + resolution = pic_sizes[0] + + process_interp_pics_upload_logic(pic_list, engine, x_am, sl_enabled, sl_am, keep_imgs, f_location, f_crf, f_preset, fps, f_models_path, resolution, add_audio, audio_track) + +def ncnn_upload_vid_to_upscale(vid_path, in_vid_fps, in_vid_res, out_vid_res, upscale_model, upscale_factor, keep_imgs): + if vid_path is None: + print("Please upload a video :)") + return + f_location, f_crf, f_preset = get_ffmpeg_params() + current_user = get_os() + process_ncnn_upscale_vid_upload_logic(vid_path, in_vid_fps, in_vid_res, out_vid_res, f_models_path, upscale_model, upscale_factor, keep_imgs, f_location, f_crf, f_preset, current_user) + +def upload_vid_to_depth(vid_to_depth_chosen_file, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth, depth_keep_imgs): + # print msg and do nothing if vid not uploaded + if not vid_to_depth_chosen_file: + return print("Please upload a video :()") + f_location, f_crf, f_preset = get_ffmpeg_params() + + process_depth_vid_upload_logic(vid_to_depth_chosen_file, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth, + vid_to_depth_chosen_file.orig_name, depth_keep_imgs, f_location, f_crf, f_preset, f_models_path) + +# END gradio-to-frame-interoplation/ upscaling functions + +def change_visibility_from_skip_video(choice): + return gr.update(visible=False) if choice else gr.update(visible=True) + +def update_r_upscale_factor(choice): + return gr.update(value='x4', choices=['x4']) if choice != 'realesr-animevideov3' else gr.update(value='x2', choices=['x2', 'x3', 'x4']) + +def change_perlin_visibility(choice): + return gr.update(visible=choice == "perlin") + +def legacy_3d_mode(choice): + return gr.update(visible=choice.lower() in ["midas+adabins (old)", 'zoe+adabins (old)']) + +def change_color_coherence_image_path_visibility(choice): + return gr.update(visible=choice == "Image") + +def change_color_coherence_video_every_N_frames_visibility(choice): + return gr.update(visible=choice == "Video Input") + +def change_seed_iter_visibility(choice): + return gr.update(visible=choice == "iter") + +def change_seed_schedule_visibility(choice): + return gr.update(visible=choice == "schedule") + +def disable_pers_flip_accord(choice): + return gr.update(visible=True) if choice in ['2D', '3D'] else gr.update(visible=False) + +def per_flip_handle(anim_mode, per_f_enabled): + if anim_mode in ['2D', '3D'] and per_f_enabled: + return gr.update(visible=True) + return gr.update(visible=False) + +def change_max_frames_visibility(choice): + return gr.update(visible=choice != "Video Input") + +def change_diffusion_cadence_visibility(choice): + return gr.update(visible=choice not in ['Video Input', 'Interpolation']) + +def disble_3d_related_stuff(choice): + return gr.update(visible=False) if choice != '3D' else gr.update(visible=True) + +def only_show_in_non_3d_mode(choice): + return gr.update(visible=False) if choice == '3D' else gr.update(visible=True) + +def enable_2d_related_stuff(choice): + return gr.update(visible=True) if choice == '2D' else gr.update(visible=False) + +def disable_by_interpolation(choice): + return gr.update(visible=False) if choice in ['Interpolation'] else gr.update(visible=True) + +def disable_by_video_input(choice): + return gr.update(visible=False) if choice in ['Video Input'] else gr.update(visible=True) + +def hide_if_none(choice): + return gr.update(visible=choice != "None") + +def change_gif_button_visibility(choice): + if choice is None or choice == "": + return gr.update(visible=True) + return gr.update(visible=False, value=False) if int(choice) > 30 else gr.update(visible=True) + +def hide_if_false(choice): + return gr.update(visible=True) if choice else gr.update(visible=False) + +def hide_if_true(choice): + return gr.update(visible=False) if choice else gr.update(visible=True) + +def disable_by_hybrid_composite_dynamic(choice, comp_mask_type): + if choice in ['Normal', 'Before Motion', 'After Generation']: + if comp_mask_type != 'None': + return gr.update(visible=True) + return gr.update(visible=False) + +def disable_by_non_optical_flow(choice): + return gr.update(visible=False) if choice != 'Optical Flow' else gr.update(visible=True) + +# Upscaling Gradio UI related funcs +def vid_upscale_gradio_update_stats(vid_path, upscale_factor): + if not vid_path: + return '---', '---', '---', '---' + factor = extract_number(upscale_factor) + fps, fcount, resolution = get_quick_vid_info(vid_path.name) + in_res_str = f"{resolution[0]}*{resolution[1]}" + out_res_str = f"{resolution[0] * factor}*{resolution[1] * factor}" + return fps, fcount, in_res_str, out_res_str + +def update_upscale_out_res(in_res, upscale_factor): + if not in_res: + return '---' + factor = extract_number(upscale_factor) + w, h = [int(x) * factor for x in in_res.split('*')] + return f"{w}*{h}" + +def update_upscale_out_res_by_model_name(in_res, upscale_model_name): + if not upscale_model_name or in_res == '---': + return '---' + factor = 2 if upscale_model_name == 'realesr-animevideov3' else 4 + return f"{int(in_res.split('*')[0]) * factor}*{int(in_res.split('*')[1]) * factor}" + +def hide_optical_flow_cadence(cadence_value): + return gr.update(visible=True) if cadence_value > 1 else gr.update(visible=False) + +def hide_interp_by_interp_status(choice): + return gr.update(visible=False) if choice == 'None' else gr.update(visible=True) + +def change_interp_x_max_limit(engine_name, current_value): + if engine_name == 'FILM': + return gr.update(maximum=300) + elif current_value > 10: + return gr.update(maximum=10, value=2) + return gr.update(maximum=10) + +def hide_interp_stats(choice): + return gr.update(visible=True) if choice is not None else gr.update(visible=False) + +def show_hybrid_html_msg(choice): + return gr.update(visible=True) if choice not in ['2D', '3D'] else gr.update(visible=False) + +def change_hybrid_tab_status(choice): + return gr.update(visible=True) if choice in ['2D', '3D'] else gr.update(visible=False) + +def show_leres_html_msg(choice): + return gr.update(visible=True) if choice.lower() == 'leres' else gr.update(visible=False) + +def show_when_ddim(sampler_name): + return gr.update(visible=True) if sampler_name.lower() == 'ddim' else gr.update(visible=False) + +def show_when_ancestral_samplers(sampler_name): + return gr.update(visible=True) if sampler_name.lower() in ['euler a', 'dpm++ 2s a', 'dpm2 a', 'dpm2 a karras', 'dpm++ 2s a karras'] else gr.update(visible=False) + +def change_css(checkbox_status): + if checkbox_status: + display = "block" + else: + display = "none" + + html_template = f''' + + ''' + return html_template diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/human_masking.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/human_masking.py new file mode 100644 index 0000000000000000000000000000000000000000..cac55c753bb449d1188633dda82f042d344c3162 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/human_masking.py @@ -0,0 +1,86 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os, cv2 +import torch +from pathlib import Path +from multiprocessing import freeze_support + +def extract_frames(input_video_path, output_imgs_path): + # Open the video file + vidcap = cv2.VideoCapture(input_video_path) + + # Get the total number of frames in the video + frame_count = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) + + # Create the output directory if it does not exist + os.makedirs(output_imgs_path, exist_ok=True) + + # Extract the frames + for i in range(frame_count): + success, image = vidcap.read() + if success: + cv2.imwrite(os.path.join(output_imgs_path, f"frame{i}.png"), image) + print(f"{frame_count} frames extracted and saved to {output_imgs_path}") + +def video2humanmasks(input_frames_path, output_folder_path, output_type, fps): + # freeze support is needed for video outputting + freeze_support() + + # check if input path exists and is a directory + if not os.path.exists(input_frames_path) or not os.path.isdir(input_frames_path): + raise ValueError("Invalid input path: {}".format(input_frames_path)) + + # check if output path exists and is a directory + if not os.path.exists(output_folder_path) or not os.path.isdir(output_folder_path): + raise ValueError("Invalid output path: {}".format(output_folder_path)) + + # check if output_type is valid + valid_output_types = ["video", "pngs", "both"] + if output_type.lower() not in valid_output_types: + raise ValueError("Invalid output type: {}. Must be one of {}".format(output_type, valid_output_types)) + + # try to predict where torch cache lives, so we can try and fetch models from cache in the next step + predicted_torch_model_cache_path = os.path.join(Path.home(), ".cache", "torch", "hub", "hithereai_RobustVideoMatting_master") + predicted_rvm_cache_testilfe = os.path.join(predicted_torch_model_cache_path, "hubconf.py") + + # try to fetch the models from cache, and only if it can't be find, download from the internet (to enable offline usage) + try: + # Try to fetch the models from cache + convert_video = torch.hub.load(predicted_torch_model_cache_path, "converter", source='local') + model = torch.hub.load(predicted_torch_model_cache_path, "resnet50", source='local').cuda() + except: + # Download from the internet if not found in cache + convert_video = torch.hub.load("hithereai/RobustVideoMatting", "converter") + model = torch.hub.load("hithereai/RobustVideoMatting", "resnet50").cuda() + + output_alpha_vid_path = os.path.join(output_folder_path, "human_masked_video.mp4") + # extract humans masks from the input folder' imgs. + # in this step PNGs will be extracted only if output_type is set to PNGs. Otherwise a video will be made, and in the case of Both, the video will be extracted in the next step to PNGs + convert_video( + model, + input_source=input_frames_path, # full path of the folder that contains all of the extracted input imgs + output_type='video' if output_type.upper() in ("VIDEO", "BOTH") else 'png_sequence', + output_alpha=output_alpha_vid_path if output_type.upper() in ("VIDEO", "BOTH") else output_folder_path, + output_video_mbps=4, + output_video_fps=fps, + downsample_ratio=None, # None for auto + seq_chunk=12, # Process n frames at once for better parallelism + progress=True # show extraction progress + ) + + if output_type.lower() == "both": + extract_frames(output_alpha_vid_path, output_folder_path) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/hybrid_video.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/hybrid_video.py new file mode 100644 index 0000000000000000000000000000000000000000..03e742a0c8d914043e0c777fd8af303dc4435f90 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/hybrid_video.py @@ -0,0 +1,613 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import pathlib +import random +import cv2 +import numpy as np +import PIL +from PIL import Image, ImageChops, ImageOps, ImageEnhance +from scipy.ndimage.filters import gaussian_filter +from .consistency_check import make_consistency +from .human_masking import video2humanmasks +from .load_images import load_image +from .video_audio_utilities import vid2frames, get_quick_vid_info, get_frame_name + +def delete_all_imgs_in_folder(folder_path): + files = list(pathlib.Path(folder_path).glob('*.jpg')) + files.extend(list(pathlib.Path(folder_path).glob('*.png'))) + for f in files: os.remove(f) + +def hybrid_generation(args, anim_args, root): + video_in_frame_path = os.path.join(args.outdir, 'inputframes') + hybrid_frame_path = os.path.join(args.outdir, 'hybridframes') + human_masks_path = os.path.join(args.outdir, 'human_masks') + + # create hybridframes folder whether using init_image or inputframes + os.makedirs(hybrid_frame_path, exist_ok=True) + + if anim_args.hybrid_generate_inputframes: + # create folders for the video input frames and optional hybrid frames to live in + os.makedirs(video_in_frame_path, exist_ok=True) + + # delete frames if overwrite = true + if anim_args.overwrite_extracted_frames: + delete_all_imgs_in_folder(hybrid_frame_path) + + # save the video frames from input video + print(f"Video to extract: {anim_args.video_init_path}") + print(f"Extracting video (1 every {anim_args.extract_nth_frame}) frames to {video_in_frame_path}...") + video_fps = vid2frames(video_path=anim_args.video_init_path, video_in_frame_path=video_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame) + + # extract alpha masks of humans from the extracted input video imgs + if anim_args.hybrid_generate_human_masks != "None": + # create a folder for the human masks imgs to live in + print(f"Checking /creating a folder for the human masks") + os.makedirs(human_masks_path, exist_ok=True) + + # delete frames if overwrite = true + if anim_args.overwrite_extracted_frames: + delete_all_imgs_in_folder(human_masks_path) + + # in case that generate_input_frames isn't selected, we won't get the video fps rate as vid2frames isn't called, So we'll check the video fps in here instead + if not anim_args.hybrid_generate_inputframes: + _, video_fps, _ = get_quick_vid_info(anim_args.video_init_path) + + # calculate the correct fps of the masked video according to the original video fps and 'extract_nth_frame' + output_fps = video_fps/anim_args.extract_nth_frame + + # generate the actual alpha masks from the input imgs + print(f"Extracting alpha humans masks from the input frames") + video2humanmasks(video_in_frame_path, human_masks_path, anim_args.hybrid_generate_human_masks, output_fps) + + # get sorted list of inputfiles + inputfiles = sorted(pathlib.Path(video_in_frame_path).glob('*.jpg')) + + if not anim_args.hybrid_use_init_image: + # determine max frames from length of input frames + anim_args.max_frames = len(inputfiles) + if anim_args.max_frames < 1: + raise Exception(f"Error: No input frames found in {video_in_frame_path}! Please check your input video path and whether you've opted to extract input frames.") + print(f"Using {anim_args.max_frames} input frames from {video_in_frame_path}...") + + # use first frame as init + if anim_args.hybrid_use_first_frame_as_init_image: + for f in inputfiles: + args.init_image = str(f) + args.init_image_box = None # init_image_box not used in this case + args.use_init = True + print(f"Using init_image from video: {args.init_image}") + break + + return args, anim_args, inputfiles + +def hybrid_composite(args, anim_args, frame_idx, prev_img, depth_model, hybrid_comp_schedules, root): + video_frame = os.path.join(args.outdir, 'inputframes', get_frame_name(anim_args.video_init_path) + f"{frame_idx:09}.jpg") + video_depth_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_vid_depth{frame_idx:09}.jpg") + depth_frame = os.path.join(args.outdir, f"{root.timestring}_depth_{frame_idx-1:09}.png") + mask_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_mask{frame_idx:09}.jpg") + comp_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_comp{frame_idx:09}.jpg") + prev_frame = os.path.join(args.outdir, 'hybridframes', get_frame_name(anim_args.video_init_path) + f"_prev{frame_idx:09}.jpg") + prev_img = cv2.cvtColor(prev_img, cv2.COLOR_BGR2RGB) + prev_img_hybrid = Image.fromarray(prev_img) + if anim_args.hybrid_use_init_image: + video_image = load_image(args.init_image, args.init_image_box) + else: + video_image = Image.open(video_frame) + video_image = video_image.resize((args.W, args.H), PIL.Image.LANCZOS) + hybrid_mask = None + + # composite mask types + if anim_args.hybrid_comp_mask_type == 'Depth': # get depth from last generation + hybrid_mask = Image.open(depth_frame) + elif anim_args.hybrid_comp_mask_type == 'Video Depth': # get video depth + video_depth = depth_model.predict(np.array(video_image), anim_args.midas_weight, root.half_precision) + depth_model.save(video_depth_frame, video_depth) + hybrid_mask = Image.open(video_depth_frame) + elif anim_args.hybrid_comp_mask_type == 'Blend': # create blend mask image + hybrid_mask = Image.blend(ImageOps.grayscale(prev_img_hybrid), ImageOps.grayscale(video_image), hybrid_comp_schedules['mask_blend_alpha']) + elif anim_args.hybrid_comp_mask_type == 'Difference': # create difference mask image + hybrid_mask = ImageChops.difference(ImageOps.grayscale(prev_img_hybrid), ImageOps.grayscale(video_image)) + + # optionally invert mask, if mask type is defined + if anim_args.hybrid_comp_mask_inverse and anim_args.hybrid_comp_mask_type != "None": + hybrid_mask = ImageOps.invert(hybrid_mask) + + # if a mask type is selected, make composition + if hybrid_mask is None: + hybrid_comp = video_image + else: + # ensure grayscale + hybrid_mask = ImageOps.grayscale(hybrid_mask) + # equalization before + if anim_args.hybrid_comp_mask_equalize in ['Before', 'Both']: + hybrid_mask = ImageOps.equalize(hybrid_mask) + # contrast + hybrid_mask = ImageEnhance.Contrast(hybrid_mask).enhance(hybrid_comp_schedules['mask_contrast']) + # auto contrast with cutoffs lo/hi + if anim_args.hybrid_comp_mask_auto_contrast: + hybrid_mask = autocontrast_grayscale(np.array(hybrid_mask), hybrid_comp_schedules['mask_auto_contrast_cutoff_low'], hybrid_comp_schedules['mask_auto_contrast_cutoff_high']) + hybrid_mask = Image.fromarray(hybrid_mask) + hybrid_mask = ImageOps.grayscale(hybrid_mask) + if anim_args.hybrid_comp_save_extra_frames: + hybrid_mask.save(mask_frame) + # equalization after + if anim_args.hybrid_comp_mask_equalize in ['After', 'Both']: + hybrid_mask = ImageOps.equalize(hybrid_mask) + # do compositing and save + hybrid_comp = Image.composite(prev_img_hybrid, video_image, hybrid_mask) + if anim_args.hybrid_comp_save_extra_frames: + hybrid_comp.save(comp_frame) + + # final blend of composite with prev_img, or just a blend if no composite is selected + hybrid_blend = Image.blend(prev_img_hybrid, hybrid_comp, hybrid_comp_schedules['alpha']) + if anim_args.hybrid_comp_save_extra_frames: + hybrid_blend.save(prev_frame) + + prev_img = cv2.cvtColor(np.array(hybrid_blend), cv2.COLOR_RGB2BGR) + + # restore to np array and return + return args, prev_img + +def get_matrix_for_hybrid_motion(frame_idx, dimensions, inputfiles, hybrid_motion): + print(f"Calculating {hybrid_motion} RANSAC matrix for frames {frame_idx} to {frame_idx+1}") + img1 = cv2.cvtColor(get_resized_image_from_filename(str(inputfiles[frame_idx]), dimensions), cv2.COLOR_BGR2GRAY) + img2 = cv2.cvtColor(get_resized_image_from_filename(str(inputfiles[frame_idx+1]), dimensions), cv2.COLOR_BGR2GRAY) + M = get_transformation_matrix_from_images(img1, img2, hybrid_motion) + return M + +def get_matrix_for_hybrid_motion_prev(frame_idx, dimensions, inputfiles, prev_img, hybrid_motion): + print(f"Calculating {hybrid_motion} RANSAC matrix for frames {frame_idx} to {frame_idx+1}") + # first handle invalid images by returning default matrix + height, width = prev_img.shape[:2] + if height == 0 or width == 0 or prev_img != np.uint8: + return get_hybrid_motion_default_matrix(hybrid_motion) + else: + prev_img_gray = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY) + img = cv2.cvtColor(get_resized_image_from_filename(str(inputfiles[frame_idx+1]), dimensions), cv2.COLOR_BGR2GRAY) + M = get_transformation_matrix_from_images(prev_img_gray, img, hybrid_motion) + return M + +def get_flow_for_hybrid_motion(frame_idx, dimensions, inputfiles, hybrid_frame_path, prev_flow, method, raft_model, consistency_check=True, consistency_blur=0, do_flow_visualization=False): + print(f"Calculating {method} optical flow {'w/consistency mask' if consistency_check else ''} for frames {frame_idx} to {frame_idx+1}") + i1 = get_resized_image_from_filename(str(inputfiles[frame_idx]), dimensions) + i2 = get_resized_image_from_filename(str(inputfiles[frame_idx+1]), dimensions) + if consistency_check: + flow, reliable_flow = get_reliable_flow_from_images(i1, i2, method, raft_model, prev_flow, consistency_blur) # forward flow w/backward consistency check + if do_flow_visualization: save_flow_mask_visualization(frame_idx, reliable_flow, hybrid_frame_path) + else: + flow = get_flow_from_images(i1, i2, method, raft_model, prev_flow) # old single flow forward + if do_flow_visualization: save_flow_visualization(frame_idx, dimensions, flow, inputfiles, hybrid_frame_path) + return flow + +def get_flow_for_hybrid_motion_prev(frame_idx, dimensions, inputfiles, hybrid_frame_path, prev_flow, prev_img, method, raft_model, consistency_check=True, consistency_blur=0, do_flow_visualization=False): + print(f"Calculating {method} optical flow {'w/consistency mask' if consistency_check else ''} for frames {frame_idx} to {frame_idx+1}") + reliable_flow = None + # first handle invalid images by returning default flow + height, width = prev_img.shape[:2] + if height == 0 or width == 0: + flow = get_hybrid_motion_default_flow(dimensions) + else: + i1 = prev_img.astype(np.uint8) + i2 = get_resized_image_from_filename(str(inputfiles[frame_idx+1]), dimensions) + if consistency_check: + flow, reliable_flow = get_reliable_flow_from_images(i1, i2, method, raft_model, prev_flow, consistency_blur) # forward flow w/backward consistency check + if do_flow_visualization: save_flow_mask_visualization(frame_idx, reliable_flow, hybrid_frame_path) + else: + flow = get_flow_from_images(i1, i2, method, raft_model, prev_flow) + if do_flow_visualization: save_flow_visualization(frame_idx, dimensions, flow, inputfiles, hybrid_frame_path) + return flow + +def get_reliable_flow_from_images(i1, i2, method, raft_model, prev_flow, consistency_blur, reliability=0): + flow_forward = get_flow_from_images(i1, i2, method, raft_model, prev_flow) + flow_backward = get_flow_from_images(i2, i1, method, raft_model, None) + reliable_flow = make_consistency(flow_forward, flow_backward, edges_unreliable=False) + if consistency_blur > 0: + reliable_flow = custom_gaussian_blur(reliable_flow.astype(np.float32), 1, consistency_blur) + return filter_flow(flow_forward, reliable_flow, consistency_blur, reliability), reliable_flow + +def custom_gaussian_blur(input_array, blur_size, sigma): + return gaussian_filter(input_array, sigma=(sigma, sigma, 0), order=0, mode='constant', cval=0.0, truncate=blur_size) + +def filter_flow(flow, reliable_flow, reliability=0.5, consistency_blur=0): + # reliability from reliabile flow: -0.75 is bad, 0 is meh/outside, 1 is great + # Create a mask from the first channel of the reliable_flow array + mask = reliable_flow[..., 0] + + # to set everything to 1 or 0 based on reliability + # mask = np.where(mask >= reliability, 1, 0) + + # Expand the mask to match the shape of the forward_flow array + mask = np.repeat(mask[..., np.newaxis], flow.shape[2], axis=2) + + # Apply the mask to the flow + return flow * mask + +def image_transform_ransac(image_cv2, M, hybrid_motion, depth=None): + if hybrid_motion == "Perspective": + return image_transform_perspective(image_cv2, M, depth) + else: # Affine + return image_transform_affine(image_cv2, M, depth) + +def image_transform_optical_flow(img, flow, flow_factor): + # if flow factor not normal, calculate flow factor + if flow_factor != 1: + flow = flow * flow_factor + # flow is reversed, so you need to reverse it: + flow = -flow + h, w = img.shape[:2] + flow[:, :, 0] += np.arange(w) + flow[:, :, 1] += np.arange(h)[:,np.newaxis] + return remap(img, flow) + +def image_transform_affine(image_cv2, M, depth=None): + if depth is None: + return cv2.warpAffine( + image_cv2, + M, + (image_cv2.shape[1],image_cv2.shape[0]), + borderMode=cv2.BORDER_REFLECT_101 + ) + else: # NEED TO IMPLEMENT THE FOLLOWING FUNCTION + return depth_based_affine_warp( + image_cv2, + depth, + M + ) + +def image_transform_perspective(image_cv2, M, depth=None): + if depth is None: + return cv2.warpPerspective( + image_cv2, + M, + (image_cv2.shape[1], image_cv2.shape[0]), + borderMode=cv2.BORDER_REFLECT_101 + ) + else: # NEED TO IMPLEMENT THE FOLLOWING FUNCTION + return render_3d_perspective( + image_cv2, + depth, + M + ) + +def get_hybrid_motion_default_matrix(hybrid_motion): + if hybrid_motion == "Perspective": + arr = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) + else: + arr = np.array([[1., 0., 0.], [0., 1., 0.]]) + return arr + +def get_hybrid_motion_default_flow(dimensions): + cols, rows = dimensions + flow = np.zeros((rows, cols, 2), np.float32) + return flow + +def get_transformation_matrix_from_images(img1, img2, hybrid_motion, confidence=0.75): + # Create SIFT detector and feature extractor + sift = cv2.SIFT_create() + + # Detect keypoints and compute descriptors + kp1, des1 = sift.detectAndCompute(img1, None) + kp2, des2 = sift.detectAndCompute(img2, None) + + # Create BFMatcher object and match descriptors + bf = cv2.BFMatcher() + matches = bf.knnMatch(des1, des2, k=2) + + # Apply ratio test to filter good matches + good_matches = [] + for m, n in matches: + if m.distance < confidence * n.distance: + good_matches.append(m) + + if len(good_matches) <= 8: + get_hybrid_motion_default_matrix(hybrid_motion) + + # Convert keypoints to numpy arrays + src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2) + dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2) + + if len(src_pts) <= 8 or len(dst_pts) <= 8: + return get_hybrid_motion_default_matrix(hybrid_motion) + elif hybrid_motion == "Perspective": # Perspective transformation (3x3) + transformation_matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) + return transformation_matrix + else: # Affine - rigid transformation (no skew 3x2) + transformation_rigid_matrix, rigid_mask = cv2.estimateAffinePartial2D(src_pts, dst_pts) + return transformation_rigid_matrix + +def get_flow_from_images(i1, i2, method, raft_model, prev_flow=None): + if method == "RAFT": + if raft_model is None: + raise Exception("RAFT Model not provided to get_flow_from_images function, cannot continue.") + return get_flow_from_images_RAFT(i1, i2, raft_model) + elif method == "DIS Medium": + return get_flow_from_images_DIS(i1, i2, 'medium', prev_flow) + elif method == "DIS Fine": + return get_flow_from_images_DIS(i1, i2, 'fine', prev_flow) + elif method == "DenseRLOF": # Unused - requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python + return get_flow_from_images_Dense_RLOF(i1, i2, prev_flow) + elif method == "SF": # Unused - requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python + return get_flow_from_images_SF(i1, i2, prev_flow) + elif method == "DualTVL1": # Unused - requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python + return get_flow_from_images_DualTVL1(i1, i2, prev_flow) + elif method == "DeepFlow": # Unused - requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python + return get_flow_from_images_DeepFlow(i1, i2, prev_flow) + elif method == "PCAFlow": # Unused - requires running opencv-contrib-python (full opencv) INSTEAD of opencv-python + return get_flow_from_images_PCAFlow(i1, i2, prev_flow) + elif method == "Farneback": # Farneback Normal: + return get_flow_from_images_Farneback(i1, i2, prev_flow) + # if we reached this point, something went wrong. raise an error: + raise RuntimeError(f"Invald flow method name: '{method}'") + +def get_flow_from_images_RAFT(i1, i2, raft_model): + flow = raft_model.predict(i1, i2) + return flow + +def get_flow_from_images_DIS(i1, i2, preset, prev_flow): + # DIS PRESETS CHART KEY: finest scale, grad desc its, patch size + # DIS_MEDIUM: 1, 25, 8 | DIS_FAST: 2, 16, 8 | DIS_ULTRAFAST: 2, 12, 8 + if preset == 'medium': preset_code = cv2.DISOPTICAL_FLOW_PRESET_MEDIUM + elif preset == 'fast': preset_code = cv2.DISOPTICAL_FLOW_PRESET_FAST + elif preset == 'ultrafast': preset_code = cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST + elif preset in ['slow','fine']: preset_code = None + i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY) + i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY) + dis = cv2.DISOpticalFlow_create(preset_code) + # custom presets + if preset == 'slow': + dis.setGradientDescentIterations(192) + dis.setFinestScale(1) + dis.setPatchSize(8) + dis.setPatchStride(4) + if preset == 'fine': + dis.setGradientDescentIterations(192) + dis.setFinestScale(0) + dis.setPatchSize(8) + dis.setPatchStride(4) + return dis.calc(i1, i2, prev_flow) + +def get_flow_from_images_Dense_RLOF(i1, i2, last_flow=None): + return cv2.optflow.calcOpticalFlowDenseRLOF(i1, i2, flow = last_flow) + +def get_flow_from_images_SF(i1, i2, last_flow=None, layers = 3, averaging_block_size = 2, max_flow = 4): + return cv2.optflow.calcOpticalFlowSF(i1, i2, layers, averaging_block_size, max_flow) + +def get_flow_from_images_DualTVL1(i1, i2, prev_flow): + i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY) + i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY) + f = cv2.optflow.DualTVL1OpticalFlow_create() + return f.calc(i1, i2, prev_flow) + +def get_flow_from_images_DeepFlow(i1, i2, prev_flow): + i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY) + i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY) + f = cv2.optflow.createOptFlow_DeepFlow() + return f.calc(i1, i2, prev_flow) + +def get_flow_from_images_PCAFlow(i1, i2, prev_flow): + i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY) + i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY) + f = cv2.optflow.createOptFlow_PCAFlow() + return f.calc(i1, i2, prev_flow) + +def get_flow_from_images_Farneback(i1, i2, preset="normal", last_flow=None, pyr_scale = 0.5, levels = 3, winsize = 15, iterations = 3, poly_n = 5, poly_sigma = 1.2, flags = 0): + flags = cv2.OPTFLOW_FARNEBACK_GAUSSIAN # Specify the operation flags + pyr_scale = 0.5 # The image scale (<1) to build pyramids for each image + if preset == "fine": + levels = 13 # The number of pyramid layers, including the initial image + winsize = 77 # The averaging window size + iterations = 13 # The number of iterations at each pyramid level + poly_n = 15 # The size of the pixel neighborhood used to find polynomial expansion in each pixel + poly_sigma = 0.8 # The standard deviation of the Gaussian used to smooth derivatives used as a basis for the polynomial expansion + else: # "normal" + levels = 5 # The number of pyramid layers, including the initial image + winsize = 21 # The averaging window size + iterations = 5 # The number of iterations at each pyramid level + poly_n = 7 # The size of the pixel neighborhood used to find polynomial expansion in each pixel + poly_sigma = 1.2 # The standard deviation of the Gaussian used to smooth derivatives used as a basis for the polynomial expansion + i1 = cv2.cvtColor(i1, cv2.COLOR_BGR2GRAY) + i2 = cv2.cvtColor(i2, cv2.COLOR_BGR2GRAY) + flags = 0 # flags = cv2.OPTFLOW_USE_INITIAL_FLOW + flow = cv2.calcOpticalFlowFarneback(i1, i2, last_flow, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags) + return flow + +def save_flow_visualization(frame_idx, dimensions, flow, inputfiles, hybrid_frame_path): + flow_img_file = os.path.join(hybrid_frame_path, f"flow{frame_idx:09}.jpg") + flow_img = cv2.imread(str(inputfiles[frame_idx])) + flow_img = cv2.resize(flow_img, (dimensions[0], dimensions[1]), cv2.INTER_AREA) + flow_img = cv2.cvtColor(flow_img, cv2.COLOR_RGB2GRAY) + flow_img = cv2.cvtColor(flow_img, cv2.COLOR_GRAY2BGR) + flow_img = draw_flow_lines_in_grid_in_color(flow_img, flow) + flow_img = cv2.cvtColor(flow_img, cv2.COLOR_BGR2RGB) + cv2.imwrite(flow_img_file, flow_img) + print(f"Saved optical flow visualization: {flow_img_file}") + +def save_flow_mask_visualization(frame_idx, reliable_flow, hybrid_frame_path, color=True): + flow_mask_img_file = os.path.join(hybrid_frame_path, f"flow_mask{frame_idx:09}.jpg") + if color: + # Normalize the reliable_flow array to the range [0, 255] + normalized_reliable_flow = (reliable_flow - reliable_flow.min()) / (reliable_flow.max() - reliable_flow.min()) * 255 + # Change the data type to np.uint8 + mask_image = normalized_reliable_flow.astype(np.uint8) + else: + # Extract the first channel of the reliable_flow array + first_channel = reliable_flow[..., 0] + # Normalize the first channel to the range [0, 255] + normalized_first_channel = (first_channel - first_channel.min()) / (first_channel.max() - first_channel.min()) * 255 + # Change the data type to np.uint8 + grayscale_image = normalized_first_channel.astype(np.uint8) + # Replicate the grayscale channel three times to form a BGR image + mask_image = np.stack((grayscale_image, grayscale_image, grayscale_image), axis=2) + cv2.imwrite(flow_mask_img_file, mask_image) + print(f"Saved mask flow visualization: {flow_mask_img_file}") + +def reliable_flow_to_image(reliable_flow): + # Extract the first channel of the reliable_flow array + first_channel = reliable_flow[..., 0] + # Normalize the first channel to the range [0, 255] + normalized_first_channel = (first_channel - first_channel.min()) / (first_channel.max() - first_channel.min()) * 255 + # Change the data type to np.uint8 + grayscale_image = normalized_first_channel.astype(np.uint8) + # Replicate the grayscale channel three times to form a BGR image + bgr_image = np.stack((grayscale_image, grayscale_image, grayscale_image), axis=2) + return bgr_image + +def draw_flow_lines_in_grid_in_color(img, flow, step=8, magnitude_multiplier=1, min_magnitude = 0, max_magnitude = 10000): + flow = flow * magnitude_multiplier + h, w = img.shape[:2] + y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int) + fx, fy = flow[y,x].T + lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2) + lines = np.int32(lines + 0.5) + vis = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR) + + mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1]) + hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8) + hsv[...,0] = ang*180/np.pi/2 + hsv[...,1] = 255 + hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) + bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) + vis = cv2.add(vis, bgr) + + # Iterate through the lines + for (x1, y1), (x2, y2) in lines: + # Calculate the magnitude of the line + magnitude = np.sqrt((x2 - x1)**2 + (y2 - y1)**2) + + # Only draw the line if it falls within the magnitude range + if min_magnitude <= magnitude <= max_magnitude: + b = int(bgr[y1, x1, 0]) + g = int(bgr[y1, x1, 1]) + r = int(bgr[y1, x1, 2]) + color = (b, g, r) + cv2.arrowedLine(vis, (x1, y1), (x2, y2), color, thickness=1, tipLength=0.1) + return vis + +def draw_flow_lines_in_color(img, flow, threshold=3, magnitude_multiplier=1, min_magnitude = 0, max_magnitude = 10000): + # h, w = img.shape[:2] + vis = img.copy() # Create a copy of the input image + + # Find the locations in the flow field where the magnitude of the flow is greater than the threshold + mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1]) + idx = np.where(mag > threshold) + + # Create HSV image + hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8) + hsv[...,0] = ang*180/np.pi/2 + hsv[...,1] = 255 + hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) + + # Convert HSV image to BGR + bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) + + # Add color from bgr + vis = cv2.add(vis, bgr) + + # Draw an arrow at each of these locations to indicate the direction of the flow + for i, (y, x) in enumerate(zip(idx[0], idx[1])): + # Calculate the magnitude of the line + x2 = x + magnitude_multiplier * int(flow[y, x, 0]) + y2 = y + magnitude_multiplier * int(flow[y, x, 1]) + magnitude = np.sqrt((x2 - x)**2 + (y2 - y)**2) + + # Only draw the line if it falls within the magnitude range + if min_magnitude <= magnitude <= max_magnitude: + if i % random.randint(100, 200) == 0: + b = int(bgr[y, x, 0]) + g = int(bgr[y, x, 1]) + r = int(bgr[y, x, 2]) + color = (b, g, r) + cv2.arrowedLine(vis, (x, y), (x2, y2), color, thickness=1, tipLength=0.25) + + return vis + +def autocontrast_grayscale(image, low_cutoff=0, high_cutoff=100): + # Perform autocontrast on a grayscale np array image. + # Find the minimum and maximum values in the image + min_val = np.percentile(image, low_cutoff) + max_val = np.percentile(image, high_cutoff) + + # Scale the image so that the minimum value is 0 and the maximum value is 255 + image = 255 * (image - min_val) / (max_val - min_val) + + # Clip values that fall outside the range [0, 255] + image = np.clip(image, 0, 255) + + return image + +def get_resized_image_from_filename(im, dimensions): + img = cv2.imread(im) + return cv2.resize(img, (dimensions[0], dimensions[1]), cv2.INTER_AREA) + +def remap(img, flow): + border_mode = cv2.BORDER_REFLECT_101 + h, w = img.shape[:2] + displacement = int(h * 0.25), int(w * 0.25) + larger_img = cv2.copyMakeBorder(img, displacement[0], displacement[0], displacement[1], displacement[1], border_mode) + lh, lw = larger_img.shape[:2] + larger_flow = extend_flow(flow, lw, lh) + remapped_img = cv2.remap(larger_img, larger_flow, None, cv2.INTER_LINEAR, border_mode) + output_img = center_crop_image(remapped_img, w, h) + return output_img + +def center_crop_image(img, w, h): + y, x, _ = img.shape + width_indent = int((x - w) / 2) + height_indent = int((y - h) / 2) + cropped_img = img[height_indent:y-height_indent, width_indent:x-width_indent] + return cropped_img + +def extend_flow(flow, w, h): + # Get the shape of the original flow image + flow_h, flow_w = flow.shape[:2] + # Calculate the position of the image in the new image + x_offset = int((w - flow_w) / 2) + y_offset = int((h - flow_h) / 2) + # Generate the X and Y grids + x_grid, y_grid = np.meshgrid(np.arange(w), np.arange(h)) + # Create the new flow image and set it to the X and Y grids + new_flow = np.dstack((x_grid, y_grid)).astype(np.float32) + # Shift the values of the original flow by the size of the border + flow[:,:,0] += x_offset + flow[:,:,1] += y_offset + # Overwrite the middle of the grid with the original flow + new_flow[y_offset:y_offset+flow_h, x_offset:x_offset+flow_w, :] = flow + # Return the extended image + return new_flow + +def abs_flow_to_rel_flow(flow, width, height): + fx, fy = flow[:,:,0], flow[:,:,1] + max_flow_x = np.max(np.abs(fx)) + max_flow_y = np.max(np.abs(fy)) + max_flow = max(max_flow_x, max_flow_y) + + rel_fx = fx / (max_flow * width) + rel_fy = fy / (max_flow * height) + return np.dstack((rel_fx, rel_fy)) + +def rel_flow_to_abs_flow(rel_flow, width, height): + rel_fx, rel_fy = rel_flow[:,:,0], rel_flow[:,:,1] + + max_flow_x = np.max(np.abs(rel_fx * width)) + max_flow_y = np.max(np.abs(rel_fy * height)) + max_flow = max(max_flow_x, max_flow_y) + + fx = rel_fx * (max_flow * width) + fy = rel_fy * (max_flow * height) + return np.dstack((fx, fy)) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/image_sharpening.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/image_sharpening.py new file mode 100644 index 0000000000000000000000000000000000000000..6db965e26d38339d5f66cfd0b6405f0d39e3f22f --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/image_sharpening.py @@ -0,0 +1,38 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import cv2 +import numpy as np + +def unsharp_mask(img, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0, mask=None): + if amount == 0: + return img + # Return a sharpened version of the image, using an unsharp mask. + # If mask is not None, only areas under mask are handled + blurred = cv2.GaussianBlur(img, kernel_size, sigma) + sharpened = float(amount + 1) * img - float(amount) * blurred + sharpened = np.maximum(sharpened, np.zeros(sharpened.shape)) + sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape)) + sharpened = sharpened.round().astype(np.uint8) + if threshold > 0: + low_contrast_mask = np.absolute(img - blurred) < threshold + np.copyto(sharpened, img, where=low_contrast_mask) + if mask is not None: + mask = np.array(mask) + masked_sharpened = cv2.bitwise_and(sharpened, sharpened, mask=mask) + masked_img = cv2.bitwise_and(img, img, mask=255-mask) + sharpened = cv2.add(masked_img, masked_sharpened) + return sharpened \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/load_images.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/load_images.py new file mode 100644 index 0000000000000000000000000000000000000000..67ee68bc75a04fd19aec6583354cdf9794c95621 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/load_images.py @@ -0,0 +1,115 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import requests +import os +from PIL import Image +import socket +import torchvision.transforms.functional as TF +from .general_utils import clean_gradio_path_strings + +def load_img(path : str, image_box :Image.Image, shape=None, use_alpha_as_mask=False): + # use_alpha_as_mask: Read the alpha channel of the image as the mask image + image = load_image(path, image_box) + image = image.convert('RGBA') if use_alpha_as_mask else image.convert('RGB') + image = image.resize(shape, resample=Image.LANCZOS) if shape is not None else image + + mask_image = None + if use_alpha_as_mask: + # Split alpha channel into a mask_image + red, green, blue, alpha = Image.Image.split(image) # not interested in R G or B, just in the alpha channel + mask_image = alpha.convert('L') + image = image.convert('RGB') + + # check using init image alpha as mask if mask is not blank + extrema = mask_image.getextrema() + if (extrema == (0,0)) or extrema == (255,255): + print("use_alpha_as_mask==True: Using the alpha channel from the init image as a mask, but the alpha channel is blank.") + print("ignoring alpha as mask.") + mask_image = None + + return image, mask_image + +def load_image(image_path :str, image_box :Image.Image): + # If init_image_box was used then no need to fetch the image via URL, just return the Image object directly. + if isinstance(image_box, Image.Image): + return image_box + + image_path = clean_gradio_path_strings(image_path) + image = None + if image_path.startswith('http://') or image_path.startswith('https://'): + try: + host = socket.gethostbyname("www.google.com") + s = socket.create_connection((host, 80), 2) + s.close() + except: + raise ConnectionError("There is no active internet connection available (couldn't connect to google.com as a network test) - please use *local* masks and init files only.") + try: + response = requests.get(image_path, stream=True) + except requests.exceptions.RequestException as e: + raise ConnectionError(f"Failed to download image {image_path} due to no internet connection. Error: {e}") + if response.status_code == 404 or response.status_code != 200: + raise ConnectionError(f"Init image url or mask image url is not valid: {image_path}") + image = Image.open(response.raw).convert('RGB') + else: + image_path = os.path.realpath(image_path) + if not os.path.exists(image_path): + raise RuntimeError(f"Init image path or mask image path is not valid: {image_path}") + image = Image.open(image_path).convert('RGB') + + return image + +def prepare_mask(mask_input, mask_shape, mask_brightness_adjust=1.0, mask_contrast_adjust=1.0): + """ + prepares mask for use in webui + """ + # Aparently 'mask_input' can be both path and Image object. + mask = load_image(mask_input, mask_input) + mask = mask.resize(mask_shape, resample=Image.LANCZOS) + if mask_brightness_adjust != 1: + mask = TF.adjust_brightness(mask, mask_brightness_adjust) + if mask_contrast_adjust != 1: + mask = TF.adjust_contrast(mask, mask_contrast_adjust) + mask = mask.convert('L') + return mask + +# "check_mask_for_errors" may have prevented errors in composable masks, +# but it CAUSES errors on any frame where it's all black. +# Bypassing the check below until we can fix it even better. +# This may break composable masks, but it makes ACTUAL masks usable. +def check_mask_for_errors(mask_input, invert_mask=False): + extrema = mask_input.getextrema() + if (invert_mask): + if extrema == (255,255): + print("after inverting mask will be blank. ignoring mask") + return None + elif extrema == (0,0): + print("mask is blank. ignoring mask") + return None + else: + return mask_input + +def get_mask(args): + return prepare_mask(args.mask_file, (args.W, args.H), args.mask_contrast_adjust, args.mask_brightness_adjust) + +def get_mask_from_file(mask_file, args): + return prepare_mask(mask_file, (args.W, args.H), args.mask_contrast_adjust, args.mask_brightness_adjust) + +def blank_if_none(mask, w, h, mode): + return Image.new(mode, (w, h), (0)) if mask is None else mask + +def none_if_blank(mask): + return None if mask.getextrema() == (0,0) else mask diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/masks.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/masks.py new file mode 100644 index 0000000000000000000000000000000000000000..af3d8bfe036e4c9b1b1f382a91cda3ac6e83a440 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/masks.py @@ -0,0 +1,56 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import cv2 +import gc +import numpy as np +from PIL import Image, ImageOps +from .video_audio_utilities import get_frame_name +from .load_images import load_image + +def do_overlay_mask(args, anim_args, img, frame_idx, is_bgr_array=False): + if is_bgr_array: + img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB) + img = Image.fromarray(img) + + if anim_args.use_mask_video: + current_mask = Image.open(os.path.join(args.outdir, 'maskframes', get_frame_name(anim_args.video_mask_path) + f"{frame_idx:09}.jpg")) + current_frame = Image.open(os.path.join(args.outdir, 'inputframes', get_frame_name(anim_args.video_init_path) + f"{frame_idx:09}.jpg")) + elif args.use_mask: + current_mask = args.mask_image if args.mask_image is not None else load_image(args.mask_file, None) + if args.init_image is None and args.init_image_box is None: + current_frame = img + else: + current_frame = load_image(args.init_image, args.init_image_box) + + current_mask = current_mask.resize((args.W, args.H), Image.LANCZOS) + current_frame = current_frame.resize((args.W, args.H), Image.LANCZOS) + current_mask = ImageOps.grayscale(current_mask) + + if args.invert_mask: + current_mask = ImageOps.invert(current_mask) + + img = Image.composite(img, current_frame, current_mask) + + if is_bgr_array: + img = np.array(img) + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + + del(current_mask, current_frame) + gc.collect() + + return img \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/noise.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/noise.py new file mode 100644 index 0000000000000000000000000000000000000000..3085d3fe0b9f58e096bf6655e2388a82c25b7fda --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/noise.py @@ -0,0 +1,88 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import torch +from torch.nn.functional import interpolate +import numpy as np +from PIL import ImageOps +import math +from .animation import sample_to_cv2 +import cv2 +from modules.shared import opts + +DEBUG_MODE = opts.data.get("deforum_debug_mode_enabled", False) + +deforum_noise_gen = torch.Generator(device='cpu') + +# 2D Perlin noise in PyTorch https://gist.github.com/vadimkantorov/ac1b097753f217c5c11bc2ff396e0a57 +def rand_perlin_2d(shape, res, fade = lambda t: 6*t**5 - 15*t**4 + 10*t**3): + delta = (res[0] / shape[0], res[1] / shape[1]) + d = (shape[0] // res[0], shape[1] // res[1]) + + grid = torch.stack(torch.meshgrid(torch.arange(0, res[0], delta[0]), torch.arange(0, res[1], delta[1]), indexing='ij'), dim = -1) % 1 + angles = 2*math.pi*torch.rand(res[0]+1, res[1]+1, generator=deforum_noise_gen) + gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim = -1) + + tile_grads = lambda slice1, slice2: gradients[slice1[0]:slice1[1], slice2[0]:slice2[1]].repeat_interleave(d[0], 0).repeat_interleave(d[1], 1) + dot = lambda grad, shift: (torch.stack((grid[:shape[0],:shape[1],0] + shift[0], grid[:shape[0],:shape[1], 1] + shift[1] ), dim = -1) * grad[:shape[0], :shape[1]]).sum(dim = -1) + + n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0]) + n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0]) + n01 = dot(tile_grads([0, -1],[1, None]), [0, -1]) + n11 = dot(tile_grads([1, None], [1, None]), [-1,-1]) + t = fade(grid[:shape[0], :shape[1]]) + return math.sqrt(2) * torch.lerp(torch.lerp(n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1]) + +def rand_perlin_2d_octaves(shape, res, octaves=1, persistence=0.5): + noise = torch.zeros(shape) + frequency = 1 + amplitude = 1 + for _ in range(int(octaves)): + noise += amplitude * rand_perlin_2d(shape, (frequency*res[0], frequency*res[1])) + frequency *= 2 + amplitude *= persistence + return noise + +def condition_noise_mask(noise_mask, invert_mask = False): + if invert_mask: + noise_mask = ImageOps.invert(noise_mask) + noise_mask = np.array(noise_mask.convert("L")) + noise_mask = noise_mask.astype(np.float32) / 255.0 + noise_mask = np.around(noise_mask, decimals=0) + noise_mask = torch.from_numpy(noise_mask) + #noise_mask = torch.round(noise_mask) + return noise_mask + +def add_noise(sample, noise_amt: float, seed: int, noise_type: str, noise_args, noise_mask = None, invert_mask = False): + deforum_noise_gen.manual_seed(seed) # Reproducibility + perlin_w = sample.shape[0] + perlin_h = sample.shape[1] + perlin_w, perlin_h = map(lambda x: x - x % 64, (perlin_w, perlin_h)) # rescale perlin to multiplies of 64 + sample2dshape = (perlin_w, perlin_h) + noise = torch.randn((sample.shape[2], perlin_w, perlin_h), generator=deforum_noise_gen) # White noise + if noise_type == 'perlin': + # rand_perlin_2d_octaves is between -1 and 1, so we need to shift it to be between 0 and 1 + # print(sample.shape) + noise = noise * ((rand_perlin_2d_octaves(sample2dshape, (int(noise_args[0]), int(noise_args[1])), octaves=noise_args[2], persistence=noise_args[3]) + torch.ones(sample2dshape)) / 2) + noise = interpolate(noise.unsqueeze(1), size=(sample.shape[0], sample.shape[1])).squeeze(1) # rescale perlin back to the target resolution + if noise_mask is not None: + noise_mask = condition_noise_mask(noise_mask, invert_mask) + noise_to_add = sample_to_cv2(noise * noise_mask) + else: + noise_to_add = sample_to_cv2(noise) + sample = cv2.addWeighted(sample, 1-noise_amt, noise_to_add, noise_amt, 0) + + return sample diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/parseq_adapter.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/parseq_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..fee9943d9dade83f7a81734f8f2e22971392b7d8 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/parseq_adapter.py @@ -0,0 +1,279 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import copy +import json +import logging +import operator +from operator import itemgetter +import numpy as np +import pandas as pd +import requests +from .animation_key_frames import DeformAnimKeys, ControlNetKeys, LooperAnimKeys +from .rich import console + +logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) + +IGNORED_FIELDS = ['fi', 'use_looper', 'imagesToKeyframe', 'schedules'] + +class ParseqAdapter(): + def __init__(self, parseq_args, anim_args, video_args, controlnet_args, loop_args, mute=False): + + # Basic data extraction + self.use_parseq = parseq_args.parseq_manifest and parseq_args.parseq_manifest.strip() + self.use_deltas = parseq_args.parseq_use_deltas + + self.parseq_json = self.load_manifest(parseq_args) if self.use_parseq else json.loads('{ "rendered_frames": [{"frame": 0}] }') + self.rendered_frames = self.parseq_json['rendered_frames'] + self.max_frame = self.get_max('frame') + self.required_frames = anim_args.max_frames + + # Wrap the original schedules with Parseq decorators, so that Parseq values will override the original values IFF appropriate. + self.anim_keys = ParseqAnimKeysDecorator(self, DeformAnimKeys(anim_args)) + self.cn_keys = ParseqControlNetKeysDecorator(self, ControlNetKeys(anim_args, controlnet_args)) if controlnet_args else None + # -1 because seed seems to be unused in LooperAnimKeys + self.looper_keys = ParseqLooperKeysDecorator(self, LooperAnimKeys(loop_args, anim_args, -1)) if loop_args else None + + # Validation + if (self.use_parseq): + self.required_fps = video_args.fps + self.config_output_fps = self.parseq_json['options']['output_fps'] + count_defined_frames = len(self.rendered_frames) + expected_defined_frames = self.max_frame+1 # frames are 0-indexed + if (expected_defined_frames != count_defined_frames): + logging.warning(f"There may be duplicated or missing frame data in the Parseq input: expected {expected_defined_frames} frames including frame 0 because the highest frame number is {self.max_frame}, but there are {count_defined_frames} frames defined.") + if not mute: + self.print_parseq_table() + + # Resolve manifest either directly from supplied value or via supplied URL + def load_manifest(self, parseq_args): + manifestOrUrl = parseq_args.parseq_manifest.strip() + if (manifestOrUrl.startswith('http')): + logging.info(f"Loading Parseq manifest from URL: {manifestOrUrl}") + try: + body = requests.get(manifestOrUrl).text + logging.debug(f"Loaded remote manifest: {body}") + parseq_json = json.loads(body) + if not parseq_json or not 'rendered_frames' in parseq_json: + raise Exception(f"The JSON data does not look like a Parseq manifest (missing field 'rendered_frames').") + + # SIDE EFFECT! + # Add the parseq manifest without the detailed frame data to parseq_args. + # This ensures it will be saved in the settings file, so that you can always + # see exactly what parseq prompts and keyframes were used, even if what the URL + # points to changes. + parseq_args.fetched_parseq_manifest_summary = copy.deepcopy(parseq_json) + if parseq_args.fetched_parseq_manifest_summary['rendered_frames']: + del parseq_args.fetched_parseq_manifest_summary['rendered_frames'] + if parseq_args.fetched_parseq_manifest_summary['rendered_frames_meta']: + del parseq_args.fetched_parseq_manifest_summary['rendered_frames_meta'] + + return parseq_json + + except Exception as e: + logging.error(f"Unable to load Parseq manifest from URL: {manifestOrUrl}") + raise e + else: + return json.loads(manifestOrUrl) + + def print_parseq_table(self): + from rich.table import Table + from rich import box + + table = Table(padding=0, box=box.ROUNDED, show_lines=True) + table.add_column("", style="white bold") + table.add_column("Parseq", style="cyan") + table.add_column("Deforum", style="green") + + table.add_row("Animation", '\n'.join(self.anim_keys.managed_fields()), '\n'.join(self.anim_keys.unmanaged_fields())) + if self.cn_keys: + table.add_row("ControlNet", '\n'.join(self.cn_keys.managed_fields()), '\n'.join(self.cn_keys.unmanaged_fields())) + if self.looper_keys: + table.add_row("Guided Images", '\n'.join(self.looper_keys.managed_fields()), '\n'.join(self.looper_keys.unmanaged_fields())) + table.add_row("Prompts", "✅" if self.manages_prompts() else "❌", "✅" if not self.manages_prompts() else "❌") + table.add_row("Frames", str(len(self.rendered_frames)), str(self.required_frames) + (" ⚠️" if str(self.required_frames) != str(len(self.rendered_frames))+"" else "")) + table.add_row("FPS", str(self.config_output_fps), str(self.required_fps) + (" ⚠️" if str(self.required_fps) != str(self.config_output_fps) else "")) + + console.print("\nUse this table to validate your Parseq & Deforum setup:") + console.print(table) + + def manages_prompts(self): + return self.use_parseq and 'deforum_prompt' in self.rendered_frames[0].keys() + + def manages_seed(self): + return self.use_parseq and 'seed' in self.rendered_frames[0].keys() + + def get_max(self, seriesName): + return max(self.rendered_frames, key=itemgetter(seriesName))[seriesName] + + +class ParseqAbstractDecorator(): + + def __init__(self, adapter: ParseqAdapter, fallback_keys): + self.adapter = adapter + self.fallback_keys = fallback_keys + + def parseq_to_series(self, seriesName): + + # Check if value is present in first frame of JSON data. If not, assume it's undefined. + # The Parseq contract is that the first frame (at least) must define values for all fields. + try: + if self.adapter.rendered_frames[0][seriesName] is not None: + logging.debug(f"Found {seriesName} in first frame of Parseq data. Assuming it's defined.") + except KeyError: + return None + + key_frame_series = pd.Series([np.nan for a in range(self.adapter.required_frames)]) + + for frame in self.adapter.rendered_frames: + frame_idx = frame['frame'] + if frame_idx < self.adapter.required_frames: + if not np.isnan(key_frame_series[frame_idx]): + logging.warning(f"Duplicate frame definition {frame_idx} detected for data {seriesName}. Latest wins.") + key_frame_series[frame_idx] = frame[seriesName] + + # If the animation will have more frames than Parseq defines, + # duplicate final value to match the required frame count. + while (frame_idx < self.adapter.required_frames): + key_frame_series[frame_idx] = operator.itemgetter(-1)(self.adapter.rendered_frames)[seriesName] + frame_idx += 1 + + return key_frame_series + + # fallback to anim_args if the series is not defined in the Parseq data + def __getattribute__(inst, name): + try: + definedField = super(ParseqAbstractDecorator, inst).__getattribute__(name) + except AttributeError: + # No field with this name has been explicitly extracted from the JSON data. + # It must be a new parameter. Let's see if it's in the raw JSON. + + parseqName = inst.strip_suffixes(name) + + # returns None if not defined in Parseq JSON data + definedField = inst.parseq_to_series(parseqName) + if (definedField is not None): + # add the field to the instance so we don't compute it again. + setattr(inst, name, definedField) + + if (definedField is not None): + return definedField + else: + logging.debug(f"Data for {name} not defined in Parseq data. Falling back to standard Deforum values.") + return getattr(inst.fallback_keys, name) + + + # parseq doesn't use _series, _schedule or _schedule_series suffixes in the + # JSON data - remove them. + def strip_suffixes(self, name): + strippableSuffixes = ['_series', '_schedule'] + parseqName = name + while any(parseqName.endswith(suffix) for suffix in strippableSuffixes): + for suffix in strippableSuffixes: + if parseqName.endswith(suffix): + parseqName = parseqName[:-len(suffix)] + return parseqName + + # parseq prefixes some field names for clarity. These prefixes are not present in the original Deforum names. + def strip_parseq_prefixes(self, name): + strippablePrefixes = ['guided_'] + parseqName = name + while any(parseqName.startswith(prefix) for prefix in strippablePrefixes): + for prefix in strippablePrefixes: + if parseqName.startswith(prefix): + parseqName = parseqName[len(prefix):] + return parseqName + + def all_parseq_fields(self): + return [self.strip_parseq_prefixes(field) for field in self.adapter.rendered_frames[0].keys() if (not field.endswith('_delta') and not field.endswith('_pc'))] + + def managed_fields(self): + all_parseq_fields = self.all_parseq_fields() + deforum_fields = [self.strip_suffixes(property) for property, _ in vars(self.fallback_keys).items() if property not in IGNORED_FIELDS and not property.startswith('_')] + return [field for field in deforum_fields if field in all_parseq_fields] + + def unmanaged_fields(self): + all_parseq_fields = self.all_parseq_fields() + deforum_fields = [self.strip_suffixes(property) for property, _ in vars(self.fallback_keys).items() if property not in IGNORED_FIELDS and not property.startswith('_')] + return [field for field in deforum_fields if field not in all_parseq_fields] + + +class ParseqControlNetKeysDecorator(ParseqAbstractDecorator): + def __init__(self, adapter: ParseqAdapter, cn_keys): + super().__init__(adapter, cn_keys) + + +class ParseqAnimKeysDecorator(ParseqAbstractDecorator): + def __init__(self, adapter: ParseqAdapter, anim_keys): + super().__init__(adapter, anim_keys) + + # Parseq treats input values as absolute values. So if you want to + # progressively rotate 180 degrees over 4 frames, you specify: 45, 90, 135, 180. + # However, many animation parameters are relative to the previous frame if there is enough + # loopback strength. So if you want to rotate 180 degrees over 5 frames, the animation engine expects: + # 45, 45, 45, 45. Therefore, for such parameter, we use the fact that Parseq supplies delta values. + optional_delta = '_delta' if self.adapter.use_deltas else '' + self.angle_series = super().parseq_to_series('angle' + optional_delta) + self.zoom_series = super().parseq_to_series('zoom' + optional_delta) + self.translation_x_series = super().parseq_to_series('translation_x' + optional_delta) + self.translation_y_series = super().parseq_to_series('translation_y' + optional_delta) + self.translation_z_series = super().parseq_to_series('translation_z' + optional_delta) + self.rotation_3d_x_series = super().parseq_to_series('rotation_3d_x' + optional_delta) + self.rotation_3d_y_series = super().parseq_to_series('rotation_3d_y' + optional_delta) + self.rotation_3d_z_series = super().parseq_to_series('rotation_3d_z' + optional_delta) + self.perspective_flip_theta_series = super().parseq_to_series('perspective_flip_theta' + optional_delta) + self.perspective_flip_phi_series = super().parseq_to_series('perspective_flip_phi' + optional_delta) + self.perspective_flip_gamma_series = super().parseq_to_series('perspective_flip_gamma' + optional_delta) + + # Non-motion animation args - never use deltas for these. + self.perspective_flip_fv_series = super().parseq_to_series('perspective_flip_fv') + self.noise_schedule_series = super().parseq_to_series('noise') + self.strength_schedule_series = super().parseq_to_series('strength') + self.sampler_schedule_series = super().parseq_to_series('sampler_schedule') + self.contrast_schedule_series = super().parseq_to_series('contrast') + self.cfg_scale_schedule_series = super().parseq_to_series('scale') + self.steps_schedule_series = super().parseq_to_series("steps_schedule") + self.seed_schedule_series = super().parseq_to_series('seed') + self.fov_series = super().parseq_to_series('fov') + self.near_series = super().parseq_to_series('near') + self.far_series = super().parseq_to_series('far') + self.subseed_schedule_series = super().parseq_to_series('subseed') + self.subseed_strength_schedule_series = super().parseq_to_series('subseed_strength') + self.kernel_schedule_series = super().parseq_to_series('antiblur_kernel') + self.sigma_schedule_series = super().parseq_to_series('antiblur_sigma') + self.amount_schedule_series = super().parseq_to_series('antiblur_amount') + self.threshold_schedule_series = super().parseq_to_series('antiblur_threshold') + + # TODO - move to a different decorator? + self.prompts = super().parseq_to_series('deforum_prompt') # formatted as "{positive} --neg {negative}" + + +class ParseqLooperKeysDecorator(ParseqAbstractDecorator): + def __init__(self, adapter: ParseqAdapter, looper_keys): + super().__init__(adapter, looper_keys) + + # The Deforum UI offers an "Image strength schedule" in the Guided Images section, + # which simply overrides the strength schedule if guided images is enabled. + # In Parseq, we just re-use the same strength schedule. + self.image_strength_schedule_series = super().parseq_to_series('strength') + + # We explicitly state the mapping for all other guided images fields so we can strip the prefix + # that we use in Parseq. + self.blendFactorMax_series = super().parseq_to_series('guided_blendFactorMax') + self.blendFactorSlope_series = super().parseq_to_series('guided_blendFactorSlope') + self.tweening_frames_schedule_series = super().parseq_to_series('guided_tweening_frames') + self.color_correction_factor_series = super().parseq_to_series('guided_color_correction_factor') + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/parseq_adapter_test.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/parseq_adapter_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b05f89e98b28448261ffecbf2163753800b066ee --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/parseq_adapter_test.py @@ -0,0 +1,298 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +## +# From /scripts directory, run like: python -m unittest deforum_helpers.parseq_adapter_test +## + +import unittest +from .parseq_adapter import ParseqAdapter +from .animation_key_frames import DeformAnimKeys, LooperAnimKeys, ControlNetKeys +from unittest.mock import patch +from unittest.mock import MagicMock, PropertyMock +from types import SimpleNamespace + +DEFAULT_ARGS = SimpleNamespace(anim_args = SimpleNamespace(max_frames=2), + video_args = SimpleNamespace(fps=30), + args = SimpleNamespace(seed=-1), + controlnet_args = SimpleNamespace(), + loop_args = SimpleNamespace()) + + +def buildParseqAdapter(parseq_use_deltas, parseq_manifest, setup_args=DEFAULT_ARGS): + return ParseqAdapter(SimpleNamespace(parseq_use_deltas=parseq_use_deltas, parseq_manifest=parseq_manifest), + setup_args.anim_args, setup_args.video_args, setup_args.controlnet_args, setup_args.loop_args) + +class TestParseqAnimKeys(unittest.TestCase): + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + def test_withprompt(self, mock_deformanimkeys, mock_controlnetkeys, mock_looperanimkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=True, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0, + "deforum_prompt": "blah" + }, + { + "frame": 1, + "deforum_prompt": "blah" + } + ] + } + """) + self.assertTrue(parseq_adapter.manages_prompts()) + + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + def test_withoutprompt(self, mock_deformanimkeys, mock_controlnetkeys, mock_looperanimkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=True, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0 + }, + { + "frame": 1 + } + ] + } + """) + self.assertFalse(parseq_adapter.manages_prompts()) + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + def test_withseed(self, mock_deformanimkeys, mock_controlnetkeys, mock_looperanimkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=True, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0, + "seed": 1 + }, + { + "frame": 1, + "seed": 2 + } + ] + } + """) + self.assertTrue(parseq_adapter.manages_seed()) + + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + def test_withoutseed(self, mock_deformanimkeys, mock_controlnetkeys, mock_looperanimkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=True, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0 + }, + { + "frame": 1 + } + ] + } + """) + self.assertFalse(parseq_adapter.manages_seed()) + + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + def test_usedelta(self, mock_deformanimkeys, mock_controlnetkeys, mock_looperanimkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=True, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0, + "angle": 90, + "angle_delta": 90 + }, + { + "frame": 1, + "angle": 180, + "angle_delta": 90 + } + ] + } + """) + self.assertEqual(parseq_adapter.anim_keys.angle_series[1], 90) + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + def test_usenondelta(self, mock_deformanimkeys, mock_controlnetkeys, mock_looperanimkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=False, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0, + "angle": 90, + "angle_delta": 90 + }, + { + "frame": 1, + "angle": 180, + "angle_delta": 90 + } + ] + } + """) + self.assertEqual(parseq_adapter.anim_keys.angle_series[1], 180) + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + def test_fallbackonundefined(self, mock_deformanimkeys, mock_controlnetkeys, mock_looperanimkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=False, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0 + }, + { + "frame": 1 + } + ] + } + """) + #TODO - this is a hacky check to make sure we're falling back to the mock. + #There must be a better way to inject an expected value via patch and check for that... + self.assertRegex(str(parseq_adapter.anim_keys.angle_series[0]), r'MagicMock') + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + def test_cn(self, mock_deformanimkeys, mock_controlnetkeys, mock_looperanimkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=False, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0, + "cn_1_weight": 1 + }, + { + "frame": 1, + "cn_1_weight": 1 + } + ] + } + """) + self.assertEqual(parseq_adapter.cn_keys.cn_1_weight_schedule_series[0], 1) + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + def test_cn_fallback(self, mock_deformanimkeys, mock_controlnetkeys, mock_looperanimkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=False, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0 + }, + { + "frame": 1 + } + ] + } + """) + #TODO - this is a hacky check to make sure we're falling back to the mock. + #There must be a better way to inject an expected value via patch and check for that... + self.assertRegex(str(parseq_adapter.cn_keys.cn_1_weight_schedule_series[0]), r'MagicMock') + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + def test_looper(self, mock_deformanimkeys, mock_looperanimkeys, mock_controlnetkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=False, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0, + "guided_blendFactorMax": 0.4 + }, + { + "frame": 1, + "guided_blendFactorMax": 0.4 + } + ] + } + """) + self.assertEqual(parseq_adapter.looper_keys.blendFactorMax_series[0], 0.4) + + @patch('deforum_helpers.parseq_adapter.DeformAnimKeys') + @patch('deforum_helpers.parseq_adapter.LooperAnimKeys') + @patch('deforum_helpers.parseq_adapter.ControlNetKeys') + def test_looper_fallback(self, mock_deformanimkeys, mock_looperanimkeys, mock_controlnetkeys): + parseq_adapter = buildParseqAdapter(parseq_use_deltas=False, parseq_manifest=""" + { + "options": { + "output_fps": 30 + }, + "rendered_frames": [ + { + "frame": 0 + }, + { + "frame": 1 + } + ] + } + """) + #TODO - this is a hacky check to make sure we're falling back to the mock. + #There must be a better way to inject an expected value via patch and check for that... + self.assertRegex(str(parseq_adapter.looper_keys.blendFactorMax_series[0]), r'MagicMock') + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/prompt.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/prompt.py new file mode 100644 index 0000000000000000000000000000000000000000..c317ebaae007d613ce70460a73e1bf16c9d841cc --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/prompt.py @@ -0,0 +1,156 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import re +import numexpr + +def check_is_number(value): + float_pattern = r'^(?=.)([+-]?([0-9]*)(\.([0-9]+))?)$' + return re.match(float_pattern, value) + +def parse_weight(match, frame=0, max_frames=0) -> float: + w_raw = match.group("weight") + max_f = max_frames # this line has to be left intact as it's in use by numexpr even though it looks like it doesn't + if w_raw is None: + return 1 + if check_is_number(w_raw): + return float(w_raw) + else: + t = frame + if len(w_raw) < 3: + print('the value inside `-characters cannot represent a math function') + return 1 + return float(numexpr.evaluate(w_raw[1:-1])) + +def split_weighted_subprompts(text, frame=0, max_frames=0): + """ + splits the prompt based on deforum webui implementation, moved from generate.py + """ + math_parser = re.compile("(?P(`[\S\s]*?`))", re.VERBOSE) + + parsed_prompt = re.sub(math_parser, lambda m: str(parse_weight(m, frame)), text) + + negative_prompts = [] + positive_prompts = [] + + prompt_split = parsed_prompt.split("--neg") + if len(prompt_split) > 1: + positive_prompts, negative_prompts = parsed_prompt.split("--neg") # TODO: add --neg to vanilla Deforum for compat + else: + positive_prompts = prompt_split[0] + negative_prompts = "" + + return positive_prompts, negative_prompts + +def interpolate_prompts(animation_prompts, max_frames): + import numpy as np + import pandas as pd + # Get prompts sorted by keyframe + max_f = max_frames + parsed_animation_prompts = {} + for key, value in animation_prompts.items(): + if check_is_number(key): # default case 0:(1 + t %5), 30:(5-t%2) + parsed_animation_prompts[key] = value + else: # math on the left hand side case 0:(1 + t %5), maxKeyframes/2:(5-t%2) + parsed_animation_prompts[int(numexpr.evaluate(key))] = value + + sorted_prompts = sorted(parsed_animation_prompts.items(), key=lambda item: int(item[0])) + + # Setup container for interpolated prompts + prompt_series = pd.Series([np.nan for a in range(max_frames)]) + + # For every keyframe prompt except the last + for i in range(0, len(sorted_prompts) - 1): + # Get current and next keyframe + current_frame = int(sorted_prompts[i][0]) + next_frame = int(sorted_prompts[i + 1][0]) + + # Ensure there's no weird ordering issues or duplication in the animation prompts + # (unlikely because we sort above, and the json parser will strip dupes) + if current_frame >= next_frame: + print(f"WARNING: Sequential prompt keyframes {i}:{current_frame} and {i + 1}:{next_frame} are not monotonously increasing; skipping interpolation.") + continue + + # Get current and next keyframes' positive and negative prompts (if any) + current_prompt = sorted_prompts[i][1] + next_prompt = sorted_prompts[i + 1][1] + current_positive, current_negative, *_ = current_prompt.split("--neg") + [None] + next_positive, next_negative, *_ = next_prompt.split("--neg") + [None] + # Calculate how much to shift the weight from current to next prompt at each frame + weight_step = 1 / (next_frame - current_frame) + + # Apply weighted prompt interpolation for each frame between current and next keyframe + # using the syntax: prompt1 :weight1 AND prompt1 :weight2 --neg nprompt1 :weight1 AND nprompt1 :weight2 + # (See: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#composable-diffusion ) + for f in range(current_frame, next_frame): + next_weight = weight_step * (f - current_frame) + current_weight = 1 - next_weight + + # We will build the prompt incrementally depending on which prompts are present + prompt_series[f] = '' + + # Cater for the case where neither, either or both current & next have positive prompts: + if current_positive: + prompt_series[f] += f" ({current_positive}):{current_weight}" + if current_positive and next_positive: + prompt_series[f] += f" AND " + if next_positive: + prompt_series[f] += f" ({next_positive}):{next_weight}" + + # Cater for the case where neither, either or both current & next have negative prompts: + if len(current_negative) > 1 or len(next_negative) > 1: + prompt_series[f] += " --neg " + if len(current_negative) > 1: + prompt_series[f] += f" ({current_negative}):{current_weight}" + if len(current_negative) > 1 and len(next_negative) > 1: + prompt_series[f] += f" AND " + if len(next_negative) > 1: + prompt_series[f] += f" ({next_negative}):{next_weight}" + + # Set explicitly declared keyframe prompts (overwriting interpolated values at the keyframe idx). This ensures: + # - That final prompt is set, and + # - Gives us a chance to emit warnings if any keyframe prompts are already using composable diffusion + for i, prompt in parsed_animation_prompts.items(): + prompt_series[int(i)] = prompt + if ' AND ' in prompt: + print(f"WARNING: keyframe {i}'s prompt is using composable diffusion (aka the 'AND' keyword). This will cause unexpected behaviour with interpolation.") + + # Return the filled series, in case max_frames is greater than the last keyframe or any ranges were skipped. + return prompt_series.ffill().bfill() + +def prepare_prompt(prompt_series, max_frames, seed, frame_idx): + max_f = max_frames - 1 + pattern = r'`.*?`' + regex = re.compile(pattern) + prompt_parsed = prompt_series + for match in regex.finditer(prompt_parsed): + matched_string = match.group(0) + parsed_string = matched_string.replace('t', f'{frame_idx}').replace("max_f", f"{max_f}").replace('`', '') + parsed_value = numexpr.evaluate(parsed_string) + prompt_parsed = prompt_parsed.replace(matched_string, str(parsed_value)) + + prompt_to_print, *after_neg = prompt_parsed.strip().split("--neg") + prompt_to_print = prompt_to_print.strip() + after_neg = "".join(after_neg).strip() + + print(f"\033[32mSeed: \033[0m{seed}") + print(f"\033[35mPrompt: \033[0m{prompt_to_print}") + if after_neg and after_neg.strip(): + print(f"\033[91mNeg Prompt: \033[0m{after_neg}") + prompt_to_print += f"--neg {after_neg}" + + # set value back into the prompt + return prompt_to_print diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/render.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/render.py new file mode 100644 index 0000000000000000000000000000000000000000..70e57ba8a567f799d067056461086c25350f7941 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/render.py @@ -0,0 +1,648 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import pandas as pd +import cv2 +import numpy as np +import numexpr +import gc +import random +import PIL +import time +from PIL import Image, ImageOps +from .generate import generate, isJson +from .noise import add_noise +from .animation import anim_frame_warp +from .animation_key_frames import DeformAnimKeys, LooperAnimKeys +from .video_audio_utilities import get_frame_name, get_next_frame, render_preview +from .depth import DepthModel +from .colors import maintain_colors +from .parseq_adapter import ParseqAdapter +from .seed import next_seed +from .image_sharpening import unsharp_mask +from .load_images import get_mask, load_img, load_image, get_mask_from_file +from .hybrid_video import ( + hybrid_generation, hybrid_composite, get_matrix_for_hybrid_motion, get_matrix_for_hybrid_motion_prev, get_flow_for_hybrid_motion, get_flow_for_hybrid_motion_prev, image_transform_ransac, + image_transform_optical_flow, get_flow_from_images, abs_flow_to_rel_flow, rel_flow_to_abs_flow) +from .save_images import save_image +from .composable_masks import compose_mask_with_check +from .settings import save_settings_from_animation_run +from .deforum_controlnet import unpack_controlnet_vids, is_controlnet_enabled +from .subtitle_handler import init_srt_file, write_frame_subtitle, format_animation_params +from .resume import get_resume_vars +from .masks import do_overlay_mask +from .prompt import prepare_prompt +from modules.shared import opts, cmd_opts, state, sd_model +from modules import lowvram, devices, sd_hijack +from .RAFT import RAFT + +from deforum_api import JobStatusTracker + +def render_animation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root): + if opts.data.get("deforum_save_gen_info_as_srt", False): # create .srt file and set timeframe mechanism using FPS + srt_filename = os.path.join(args.outdir, f"{root.timestring}.srt") + srt_frame_duration = init_srt_file(srt_filename, video_args.fps) + + if anim_args.animation_mode in ['2D', '3D']: + # handle hybrid video generation + if anim_args.hybrid_composite != 'None' or anim_args.hybrid_motion in ['Affine', 'Perspective', 'Optical Flow']: + args, anim_args, inputfiles = hybrid_generation(args, anim_args, root) + # path required by hybrid functions, even if hybrid_comp_save_extra_frames is False + hybrid_frame_path = os.path.join(args.outdir, 'hybridframes') + # initialize prev_flow + if anim_args.hybrid_motion == 'Optical Flow': + prev_flow = None + + if loop_args.use_looper: + print("Using Guided Images mode: seed_behavior will be set to 'schedule' and 'strength_0_no_init' to False") + if args.strength == 0: + raise RuntimeError("Strength needs to be greater than 0 in Init tab") + args.strength_0_no_init = False + args.seed_behavior = "schedule" + if not isJson(loop_args.init_images): + raise RuntimeError("The images set for use with keyframe-guidance are not in a proper JSON format") + + # handle controlnet video input frames generation + if is_controlnet_enabled(controlnet_args): + unpack_controlnet_vids(args, anim_args, controlnet_args) + + # initialise Parseq adapter + parseq_adapter = ParseqAdapter(parseq_args, anim_args, video_args, controlnet_args, loop_args) + + # expand key frame strings to values + keys = DeformAnimKeys(anim_args, args.seed) if not parseq_adapter.use_parseq else parseq_adapter.anim_keys + loopSchedulesAndData = LooperAnimKeys(loop_args, anim_args, args.seed) if not parseq_adapter.use_parseq else parseq_adapter.looper_keys + + # create output folder for the batch + os.makedirs(args.outdir, exist_ok=True) + print(f"Saving animation frames to:\n{args.outdir}") + + # save settings.txt file for the current run + save_settings_from_animation_run(args, anim_args, parseq_args, loop_args, controlnet_args, video_args, root) + + # resume from timestring + if anim_args.resume_from_timestring: + root.timestring = anim_args.resume_timestring + + # Always enable pseudo-3d with parseq. No need for an extra toggle: + # Whether it's used or not in practice is defined by the schedules + if parseq_adapter.use_parseq: + anim_args.flip_2d_perspective = True + + # expand prompts out to per-frame + if parseq_adapter.manages_prompts(): + prompt_series = keys.prompts + else: + prompt_series = pd.Series([np.nan for a in range(anim_args.max_frames)]) + for i, prompt in root.animation_prompts.items(): + if str(i).isdigit(): + prompt_series[int(i)] = prompt + else: + prompt_series[int(numexpr.evaluate(i))] = prompt + prompt_series = prompt_series.ffill().bfill() + + # check for video inits + using_vid_init = anim_args.animation_mode == 'Video Input' + + # load depth model for 3D + predict_depths = (anim_args.animation_mode == '3D' and anim_args.use_depth_warping) or anim_args.save_depth_maps + predict_depths = predict_depths or (anim_args.hybrid_composite and anim_args.hybrid_comp_mask_type in ['Depth', 'Video Depth']) + predict_depths = predict_depths and not args.motion_preview_mode + if predict_depths: + keep_in_vram = opts.data.get("deforum_keep_3d_models_in_vram") + + device = ('cpu' if cmd_opts.lowvram or cmd_opts.medvram else root.device) + depth_model = DepthModel(root.models_path, device, root.half_precision, keep_in_vram=keep_in_vram, depth_algorithm=anim_args.depth_algorithm, Width=args.W, Height=args.H, + midas_weight=anim_args.midas_weight) + + # depth-based hybrid composite mask requires saved depth maps + if anim_args.hybrid_composite != 'None' and anim_args.hybrid_comp_mask_type == 'Depth': + anim_args.save_depth_maps = True + else: + depth_model = None + anim_args.save_depth_maps = False + + raft_model = None + load_raft = (anim_args.optical_flow_cadence == "RAFT" and int(anim_args.diffusion_cadence) > 1) or \ + (anim_args.hybrid_motion == "Optical Flow" and anim_args.hybrid_flow_method == "RAFT") or \ + (anim_args.optical_flow_redo_generation == "RAFT") + load_raft = load_raft and not args.motion_preview_mode + if load_raft: + print("Loading RAFT model...") + raft_model = RAFT() + + # state for interpolating between diffusion steps + turbo_steps = 1 if using_vid_init else int(anim_args.diffusion_cadence) + turbo_prev_image, turbo_prev_frame_idx = None, 0 + turbo_next_image, turbo_next_frame_idx = None, 0 + + # initialize vars + prev_img = None + color_match_sample = None + start_frame = 0 + + # resume animation (requires at least two frames - see function) + if anim_args.resume_from_timestring: + # determine last frame and frame to start on + prev_frame, next_frame, prev_img, next_img = get_resume_vars( + folder=args.outdir, + timestring=anim_args.resume_timestring, + cadence=turbo_steps + ) + + # set up turbo step vars + if turbo_steps > 1: + turbo_prev_image, turbo_prev_frame_idx = prev_img, prev_frame + turbo_next_image, turbo_next_frame_idx = next_img, next_frame + + # advance start_frame to next frame + start_frame = next_frame + 1 + + frame_idx = start_frame + + # reset the mask vals as they are overwritten in the compose_mask algorithm + mask_vals = {} + noise_mask_vals = {} + + mask_vals['everywhere'] = Image.new('1', (args.W, args.H), 1) + noise_mask_vals['everywhere'] = Image.new('1', (args.W, args.H), 1) + + mask_image = None + + if args.use_init and ((args.init_image != None and args.init_image != '') or args.init_image_box != None): + _, mask_image = load_img(args.init_image, + args.init_image_box, + shape=(args.W, args.H), + use_alpha_as_mask=args.use_alpha_as_mask) + mask_vals['video_mask'] = mask_image + noise_mask_vals['video_mask'] = mask_image + + # Grab the first frame masks since they wont be provided until next frame + # Video mask overrides the init image mask, also, won't be searching for init_mask if use_mask_video is set + # Made to solve https://github.com/deforum-art/deforum-for-automatic1111-webui/issues/386 + if anim_args.use_mask_video: + + args.mask_file = get_mask_from_file(get_next_frame(args.outdir, anim_args.video_mask_path, frame_idx, True), args) + root.noise_mask = get_mask_from_file(get_next_frame(args.outdir, anim_args.video_mask_path, frame_idx, True), args) + + mask_vals['video_mask'] = get_mask_from_file(get_next_frame(args.outdir, anim_args.video_mask_path, frame_idx, True), args) + noise_mask_vals['video_mask'] = get_mask_from_file(get_next_frame(args.outdir, anim_args.video_mask_path, frame_idx, True), args) + elif mask_image is None and args.use_mask: + mask_vals['video_mask'] = get_mask(args) + noise_mask_vals['video_mask'] = get_mask(args) # TODO?: add a different default noisc mask + + # get color match for 'Image' color coherence only once, before loop + if anim_args.color_coherence == 'Image': + color_match_sample = load_image(anim_args.color_coherence_image_path, None) + color_match_sample = color_match_sample.resize((args.W, args.H), PIL.Image.LANCZOS) + color_match_sample = cv2.cvtColor(np.array(color_match_sample), cv2.COLOR_RGB2BGR) + + # Webui + state.job_count = anim_args.max_frames + last_preview_frame = 0 + + while frame_idx < anim_args.max_frames: + # Webui + + state.job = f"frame {frame_idx + 1}/{anim_args.max_frames}" + state.job_no = frame_idx + 1 + + if state.skipped: + print("\n** PAUSED **") + state.skipped = False + while not state.skipped: + time.sleep(0.1) + print("** RESUMING **") + + print(f"\033[36mAnimation frame: \033[0m{frame_idx}/{anim_args.max_frames} ") + + noise = keys.noise_schedule_series[frame_idx] + strength = keys.strength_schedule_series[frame_idx] + scale = keys.cfg_scale_schedule_series[frame_idx] + contrast = keys.contrast_schedule_series[frame_idx] + kernel = int(keys.kernel_schedule_series[frame_idx]) + sigma = keys.sigma_schedule_series[frame_idx] + amount = keys.amount_schedule_series[frame_idx] + threshold = keys.threshold_schedule_series[frame_idx] + cadence_flow_factor = keys.cadence_flow_factor_schedule_series[frame_idx] + redo_flow_factor = keys.redo_flow_factor_schedule_series[frame_idx] + hybrid_comp_schedules = { + "alpha": keys.hybrid_comp_alpha_schedule_series[frame_idx], + "mask_blend_alpha": keys.hybrid_comp_mask_blend_alpha_schedule_series[frame_idx], + "mask_contrast": keys.hybrid_comp_mask_contrast_schedule_series[frame_idx], + "mask_auto_contrast_cutoff_low": int(keys.hybrid_comp_mask_auto_contrast_cutoff_low_schedule_series[frame_idx]), + "mask_auto_contrast_cutoff_high": int(keys.hybrid_comp_mask_auto_contrast_cutoff_high_schedule_series[frame_idx]), + "flow_factor": keys.hybrid_flow_factor_schedule_series[frame_idx] + } + scheduled_sampler_name = None + scheduled_clipskip = None + scheduled_noise_multiplier = None + scheduled_ddim_eta = None + scheduled_ancestral_eta = None + + mask_seq = None + noise_mask_seq = None + if anim_args.enable_steps_scheduling and keys.steps_schedule_series[frame_idx] is not None: + args.steps = int(keys.steps_schedule_series[frame_idx]) + if anim_args.enable_sampler_scheduling and keys.sampler_schedule_series[frame_idx] is not None: + scheduled_sampler_name = keys.sampler_schedule_series[frame_idx].casefold() + if anim_args.enable_clipskip_scheduling and keys.clipskip_schedule_series[frame_idx] is not None: + scheduled_clipskip = int(keys.clipskip_schedule_series[frame_idx]) + if anim_args.enable_noise_multiplier_scheduling and keys.noise_multiplier_schedule_series[frame_idx] is not None: + scheduled_noise_multiplier = float(keys.noise_multiplier_schedule_series[frame_idx]) + if anim_args.enable_ddim_eta_scheduling and keys.ddim_eta_schedule_series[frame_idx] is not None: + scheduled_ddim_eta = float(keys.ddim_eta_schedule_series[frame_idx]) + if anim_args.enable_ancestral_eta_scheduling and keys.ancestral_eta_schedule_series[frame_idx] is not None: + scheduled_ancestral_eta = float(keys.ancestral_eta_schedule_series[frame_idx]) + if args.use_mask and keys.mask_schedule_series[frame_idx] is not None: + mask_seq = keys.mask_schedule_series[frame_idx] + if anim_args.use_noise_mask and keys.noise_mask_schedule_series[frame_idx] is not None: + noise_mask_seq = keys.noise_mask_schedule_series[frame_idx] + + if args.use_mask and not anim_args.use_noise_mask: + noise_mask_seq = mask_seq + + depth = None + + if anim_args.animation_mode == '3D' and (cmd_opts.lowvram or cmd_opts.medvram): + # Unload the main checkpoint and load the depth model + lowvram.send_everything_to_cpu() + sd_hijack.model_hijack.undo_hijack(sd_model) + devices.torch_gc() + if predict_depths: depth_model.to(root.device) + + if turbo_steps == 1 and opts.data.get("deforum_save_gen_info_as_srt"): + params_to_print = opts.data.get("deforum_save_gen_info_as_srt_params", ['Seed']) + params_string = format_animation_params(keys, prompt_series, frame_idx, params_to_print) + write_frame_subtitle(srt_filename, frame_idx, srt_frame_duration, f"F#: {frame_idx}; Cadence: false; Seed: {args.seed}; {params_string}") + params_string = None + + # emit in-between frames + if turbo_steps > 1: + tween_frame_start_idx = max(start_frame, frame_idx - turbo_steps) + cadence_flow = None + for tween_frame_idx in range(tween_frame_start_idx, frame_idx): + # update progress during cadence + state.job = f"frame {tween_frame_idx + 1}/{anim_args.max_frames}" + state.job_no = tween_frame_idx + 1 + # cadence vars + tween = float(tween_frame_idx - tween_frame_start_idx + 1) / float(frame_idx - tween_frame_start_idx) + advance_prev = turbo_prev_image is not None and tween_frame_idx > turbo_prev_frame_idx + advance_next = tween_frame_idx > turbo_next_frame_idx + + # optical flow cadence setup before animation warping + if anim_args.animation_mode in ['2D', '3D'] and anim_args.optical_flow_cadence != 'None': + if keys.strength_schedule_series[tween_frame_start_idx] > 0: + if cadence_flow is None and turbo_prev_image is not None and turbo_next_image is not None: + cadence_flow = get_flow_from_images(turbo_prev_image, turbo_next_image, anim_args.optical_flow_cadence, raft_model) / 2 + turbo_next_image = image_transform_optical_flow(turbo_next_image, -cadence_flow, 1) + + if opts.data.get("deforum_save_gen_info_as_srt"): + params_to_print = opts.data.get("deforum_save_gen_info_as_srt_params", ['Seed']) + params_string = format_animation_params(keys, prompt_series, tween_frame_idx, params_to_print) + write_frame_subtitle(srt_filename, tween_frame_idx, srt_frame_duration, f"F#: {tween_frame_idx}; Cadence: {tween < 1.0}; Seed: {args.seed}; {params_string}") + params_string = None + + print(f"Creating in-between {'' if cadence_flow is None else anim_args.optical_flow_cadence + ' optical flow '}cadence frame: {tween_frame_idx}; tween:{tween:0.2f};") + + if depth_model is not None: + assert (turbo_next_image is not None) + depth = depth_model.predict(turbo_next_image, anim_args.midas_weight, root.half_precision) + + if advance_prev: + turbo_prev_image, _ = anim_frame_warp(turbo_prev_image, args, anim_args, keys, tween_frame_idx, depth_model, depth=depth, device=root.device, half_precision=root.half_precision) + if advance_next: + turbo_next_image, _ = anim_frame_warp(turbo_next_image, args, anim_args, keys, tween_frame_idx, depth_model, depth=depth, device=root.device, half_precision=root.half_precision) + + # hybrid video motion - warps turbo_prev_image or turbo_next_image to match motion + if tween_frame_idx > 0: + if anim_args.hybrid_motion in ['Affine', 'Perspective']: + if anim_args.hybrid_motion_use_prev_img: + matrix = get_matrix_for_hybrid_motion_prev(tween_frame_idx - 1, (args.W, args.H), inputfiles, prev_img, anim_args.hybrid_motion) + if advance_prev: + turbo_prev_image = image_transform_ransac(turbo_prev_image, matrix, anim_args.hybrid_motion) + if advance_next: + turbo_next_image = image_transform_ransac(turbo_next_image, matrix, anim_args.hybrid_motion) + else: + matrix = get_matrix_for_hybrid_motion(tween_frame_idx - 1, (args.W, args.H), inputfiles, anim_args.hybrid_motion) + if advance_prev: + turbo_prev_image = image_transform_ransac(turbo_prev_image, matrix, anim_args.hybrid_motion) + if advance_next: + turbo_next_image = image_transform_ransac(turbo_next_image, matrix, anim_args.hybrid_motion) + if anim_args.hybrid_motion in ['Optical Flow']: + if anim_args.hybrid_motion_use_prev_img: + flow = get_flow_for_hybrid_motion_prev(tween_frame_idx - 1, (args.W, args.H), inputfiles, hybrid_frame_path, prev_flow, prev_img, anim_args.hybrid_flow_method, raft_model, + anim_args.hybrid_flow_consistency, anim_args.hybrid_consistency_blur, anim_args.hybrid_comp_save_extra_frames) + if advance_prev: + turbo_prev_image = image_transform_optical_flow(turbo_prev_image, flow, hybrid_comp_schedules['flow_factor']) + if advance_next: + turbo_next_image = image_transform_optical_flow(turbo_next_image, flow, hybrid_comp_schedules['flow_factor']) + prev_flow = flow + else: + flow = get_flow_for_hybrid_motion(tween_frame_idx - 1, (args.W, args.H), inputfiles, hybrid_frame_path, prev_flow, anim_args.hybrid_flow_method, raft_model, + anim_args.hybrid_flow_consistency, anim_args.hybrid_consistency_blur, anim_args.hybrid_comp_save_extra_frames) + if advance_prev: + turbo_prev_image = image_transform_optical_flow(turbo_prev_image, flow, hybrid_comp_schedules['flow_factor']) + if advance_next: + turbo_next_image = image_transform_optical_flow(turbo_next_image, flow, hybrid_comp_schedules['flow_factor']) + prev_flow = flow + + # do optical flow cadence after animation warping + if cadence_flow is not None: + cadence_flow = abs_flow_to_rel_flow(cadence_flow, args.W, args.H) + cadence_flow, _ = anim_frame_warp(cadence_flow, args, anim_args, keys, tween_frame_idx, depth_model, depth=depth, device=root.device, half_precision=root.half_precision) + cadence_flow_inc = rel_flow_to_abs_flow(cadence_flow, args.W, args.H) * tween + if advance_prev: + turbo_prev_image = image_transform_optical_flow(turbo_prev_image, cadence_flow_inc, cadence_flow_factor) + if advance_next: + turbo_next_image = image_transform_optical_flow(turbo_next_image, cadence_flow_inc, cadence_flow_factor) + + turbo_prev_frame_idx = turbo_next_frame_idx = tween_frame_idx + + if turbo_prev_image is not None and tween < 1.0: + img = turbo_prev_image * (1.0 - tween) + turbo_next_image * tween + else: + img = turbo_next_image + + # intercept and override to grayscale + if anim_args.color_force_grayscale: + img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2GRAY) + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + # overlay mask + if args.overlay_mask and (anim_args.use_mask_video or args.use_mask): + img = do_overlay_mask(args, anim_args, img, tween_frame_idx, True) + + # get prev_img during cadence + prev_img = img + + # current image update for cadence frames (left commented because it doesn't currently update the preview) + # state.current_image = Image.fromarray(cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB)) + + # saving cadence frames + filename = f"{root.timestring}_{tween_frame_idx:09}.png" + cv2.imwrite(os.path.join(args.outdir, filename), img) + if anim_args.save_depth_maps: + depth_model.save(os.path.join(args.outdir, f"{root.timestring}_depth_{tween_frame_idx:09}.png"), depth) + + # get color match for video outside of prev_img conditional + hybrid_available = anim_args.hybrid_composite != 'None' or anim_args.hybrid_motion in ['Optical Flow', 'Affine', 'Perspective'] + if anim_args.color_coherence == 'Video Input' and hybrid_available: + if int(frame_idx) % int(anim_args.color_coherence_video_every_N_frames) == 0: + prev_vid_img = Image.open(os.path.join(args.outdir, 'inputframes', get_frame_name(anim_args.video_init_path) + f"{frame_idx:09}.jpg")) + prev_vid_img = prev_vid_img.resize((args.W, args.H), PIL.Image.LANCZOS) + color_match_sample = np.asarray(prev_vid_img) + color_match_sample = cv2.cvtColor(color_match_sample, cv2.COLOR_RGB2BGR) + + # after 1st frame, prev_img exists + if prev_img is not None: + # apply transforms to previous frame + prev_img, depth = anim_frame_warp(prev_img, args, anim_args, keys, frame_idx, depth_model, depth=None, device=root.device, half_precision=root.half_precision) + + # do hybrid compositing before motion + if anim_args.hybrid_composite == 'Before Motion': + args, prev_img = hybrid_composite(args, anim_args, frame_idx, prev_img, depth_model, hybrid_comp_schedules, root) + + # hybrid video motion - warps prev_img to match motion, usually to prepare for compositing + if anim_args.hybrid_motion in ['Affine', 'Perspective']: + if anim_args.hybrid_motion_use_prev_img: + matrix = get_matrix_for_hybrid_motion_prev(frame_idx - 1, (args.W, args.H), inputfiles, prev_img, anim_args.hybrid_motion) + else: + matrix = get_matrix_for_hybrid_motion(frame_idx - 1, (args.W, args.H), inputfiles, anim_args.hybrid_motion) + prev_img = image_transform_ransac(prev_img, matrix, anim_args.hybrid_motion) + if anim_args.hybrid_motion in ['Optical Flow']: + if anim_args.hybrid_motion_use_prev_img: + flow = get_flow_for_hybrid_motion_prev(frame_idx - 1, (args.W, args.H), inputfiles, hybrid_frame_path, prev_flow, prev_img, anim_args.hybrid_flow_method, raft_model, + anim_args.hybrid_flow_consistency, anim_args.hybrid_consistency_blur, anim_args.hybrid_comp_save_extra_frames) + else: + flow = get_flow_for_hybrid_motion(frame_idx - 1, (args.W, args.H), inputfiles, hybrid_frame_path, prev_flow, anim_args.hybrid_flow_method, raft_model, + anim_args.hybrid_flow_consistency, anim_args.hybrid_consistency_blur, anim_args.hybrid_comp_save_extra_frames) + prev_img = image_transform_optical_flow(prev_img, flow, hybrid_comp_schedules['flow_factor']) + prev_flow = flow + + # do hybrid compositing after motion (normal) + if anim_args.hybrid_composite == 'Normal': + args, prev_img = hybrid_composite(args, anim_args, frame_idx, prev_img, depth_model, hybrid_comp_schedules, root) + + # apply color matching + if anim_args.color_coherence != 'None': + if color_match_sample is None: + color_match_sample = prev_img.copy() + else: + prev_img = maintain_colors(prev_img, color_match_sample, anim_args.color_coherence) + + # intercept and override to grayscale + if anim_args.color_force_grayscale: + prev_img = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY) + prev_img = cv2.cvtColor(prev_img, cv2.COLOR_GRAY2BGR) + + # apply scaling + contrast_image = (prev_img * contrast).round().astype(np.uint8) + # anti-blur + if amount > 0: + contrast_image = unsharp_mask(contrast_image, (kernel, kernel), sigma, amount, threshold, mask_image if args.use_mask else None) + # apply frame noising + if args.use_mask or anim_args.use_noise_mask: + root.noise_mask = compose_mask_with_check(root, args, noise_mask_seq, noise_mask_vals, Image.fromarray(cv2.cvtColor(contrast_image, cv2.COLOR_BGR2RGB))) + noised_image = add_noise(contrast_image, noise, args.seed, anim_args.noise_type, + (anim_args.perlin_w, anim_args.perlin_h, anim_args.perlin_octaves, anim_args.perlin_persistence), + root.noise_mask, args.invert_mask) + + # use transformed previous frame as init for current + args.use_init = True + root.init_sample = Image.fromarray(cv2.cvtColor(noised_image, cv2.COLOR_BGR2RGB)) + args.strength = max(0.0, min(1.0, strength)) + + args.scale = scale + + # Pix2Pix Image CFG Scale - does *nothing* with non pix2pix checkpoints + args.pix2pix_img_cfg_scale = float(keys.pix2pix_img_cfg_scale_series[frame_idx]) + + # grab prompt for current frame + args.prompt = prompt_series[frame_idx] + + if args.seed_behavior == 'schedule' or parseq_adapter.manages_seed(): + args.seed = int(keys.seed_schedule_series[frame_idx]) + + if anim_args.enable_checkpoint_scheduling: + args.checkpoint = keys.checkpoint_schedule_series[frame_idx] + else: + args.checkpoint = None + + # SubSeed scheduling + if anim_args.enable_subseed_scheduling: + root.subseed = int(keys.subseed_schedule_series[frame_idx]) + root.subseed_strength = float(keys.subseed_strength_schedule_series[frame_idx]) + + if parseq_adapter.manages_seed(): + anim_args.enable_subseed_scheduling = True + root.subseed = int(keys.subseed_schedule_series[frame_idx]) + root.subseed_strength = keys.subseed_strength_schedule_series[frame_idx] + + # set value back into the prompt - prepare and report prompt and seed + args.prompt = prepare_prompt(args.prompt, anim_args.max_frames, args.seed, frame_idx) + + # grab init image for current frame + if using_vid_init: + init_frame = get_next_frame(args.outdir, anim_args.video_init_path, frame_idx, False) + print(f"Using video init frame {init_frame}") + args.init_image = init_frame + args.init_image_box = None # init_image_box not used in this case + args.strength = max(0.0, min(1.0, strength)) + if anim_args.use_mask_video: + args.mask_file = get_mask_from_file(get_next_frame(args.outdir, anim_args.video_mask_path, frame_idx, True), args) + root.noise_mask = get_mask_from_file(get_next_frame(args.outdir, anim_args.video_mask_path, frame_idx, True), args) + + mask_vals['video_mask'] = get_mask_from_file(get_next_frame(args.outdir, anim_args.video_mask_path, frame_idx, True), args) + + if args.use_mask: + args.mask_image = compose_mask_with_check(root, args, mask_seq, mask_vals, root.init_sample) if root.init_sample is not None else None # we need it only after the first frame anyway + + # setting up some arguments for the looper + loop_args.imageStrength = loopSchedulesAndData.image_strength_schedule_series[frame_idx] + loop_args.blendFactorMax = loopSchedulesAndData.blendFactorMax_series[frame_idx] + loop_args.blendFactorSlope = loopSchedulesAndData.blendFactorSlope_series[frame_idx] + loop_args.tweeningFrameSchedule = loopSchedulesAndData.tweening_frames_schedule_series[frame_idx] + loop_args.colorCorrectionFactor = loopSchedulesAndData.color_correction_factor_series[frame_idx] + loop_args.use_looper = loopSchedulesAndData.use_looper + loop_args.imagesToKeyframe = loopSchedulesAndData.imagesToKeyframe + + if 'img2img_fix_steps' in opts.data and opts.data["img2img_fix_steps"]: # disable "with img2img do exactly x steps" from general setting, as it *ruins* deforum animations + opts.data["img2img_fix_steps"] = False + if scheduled_clipskip is not None: + opts.data["CLIP_stop_at_last_layers"] = scheduled_clipskip + if scheduled_noise_multiplier is not None: + opts.data["initial_noise_multiplier"] = scheduled_noise_multiplier + if scheduled_ddim_eta is not None: + opts.data["eta_ddim"] = scheduled_ddim_eta + if scheduled_ancestral_eta is not None: + opts.data["eta_ancestral"] = scheduled_ancestral_eta + + if anim_args.animation_mode == '3D' and (cmd_opts.lowvram or cmd_opts.medvram): + if predict_depths: depth_model.to('cpu') + devices.torch_gc() + lowvram.setup_for_low_vram(sd_model, cmd_opts.medvram) + sd_hijack.model_hijack.hijack(sd_model) + + optical_flow_redo_generation = anim_args.optical_flow_redo_generation if not args.motion_preview_mode else 'None' + + # optical flow redo before generation + if optical_flow_redo_generation != 'None' and prev_img is not None and strength > 0: + print(f"Optical flow redo is diffusing and warping using {optical_flow_redo_generation} optical flow before generation.") + stored_seed = args.seed + args.seed = random.randint(0, 2 ** 32 - 1) + disposable_image = generate(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame_idx, sampler_name=scheduled_sampler_name) + disposable_image = cv2.cvtColor(np.array(disposable_image), cv2.COLOR_RGB2BGR) + disposable_flow = get_flow_from_images(prev_img, disposable_image, optical_flow_redo_generation, raft_model) + disposable_image = cv2.cvtColor(disposable_image, cv2.COLOR_BGR2RGB) + disposable_image = image_transform_optical_flow(disposable_image, disposable_flow, redo_flow_factor) + args.seed = stored_seed + root.init_sample = Image.fromarray(disposable_image) + del (disposable_image, disposable_flow, stored_seed) + gc.collect() + + # diffusion redo + if int(anim_args.diffusion_redo) > 0 and prev_img is not None and strength > 0 and not args.motion_preview_mode: + stored_seed = args.seed + for n in range(0, int(anim_args.diffusion_redo)): + print(f"Redo generation {n + 1} of {int(anim_args.diffusion_redo)} before final generation") + args.seed = random.randint(0, 2 ** 32 - 1) + disposable_image = generate(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame_idx, sampler_name=scheduled_sampler_name) + disposable_image = cv2.cvtColor(np.array(disposable_image), cv2.COLOR_RGB2BGR) + # color match on last one only + if n == int(anim_args.diffusion_redo): + disposable_image = maintain_colors(prev_img, color_match_sample, anim_args.color_coherence) + args.seed = stored_seed + root.init_sample = Image.fromarray(cv2.cvtColor(disposable_image, cv2.COLOR_BGR2RGB)) + del (disposable_image, stored_seed) + gc.collect() + + # generation + image = generate(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame_idx, sampler_name=scheduled_sampler_name) + + if image is None: + break + + # do hybrid video after generation + if frame_idx > 0 and anim_args.hybrid_composite == 'After Generation': + image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) + args, image = hybrid_composite(args, anim_args, frame_idx, image, depth_model, hybrid_comp_schedules, root) + image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + + # color matching on first frame is after generation, color match was collected earlier, so we do an extra generation to avoid the corruption introduced by the color match of first output + if frame_idx == 0 and (anim_args.color_coherence == 'Image' or (anim_args.color_coherence == 'Video Input' and hybrid_available)): + image = maintain_colors(cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR), color_match_sample, anim_args.color_coherence) + image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + elif color_match_sample is not None and anim_args.color_coherence != 'None' and not anim_args.legacy_colormatch: + image = maintain_colors(cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR), color_match_sample, anim_args.color_coherence) + image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + + # intercept and override to grayscale + if anim_args.color_force_grayscale: + image = ImageOps.grayscale(image) + image = ImageOps.colorize(image, black="black", white="white") + + # overlay mask + if args.overlay_mask and (anim_args.use_mask_video or args.use_mask): + image = do_overlay_mask(args, anim_args, image, frame_idx) + + # on strength 0, set color match to generation + if ((not anim_args.legacy_colormatch and not args.use_init) or (anim_args.legacy_colormatch and strength == 0)) and not anim_args.color_coherence in ['Image', 'Video Input']: + color_match_sample = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR) + + opencv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) + if not using_vid_init: + prev_img = opencv_image + + if turbo_steps > 1: + turbo_prev_image, turbo_prev_frame_idx = turbo_next_image, turbo_next_frame_idx + turbo_next_image, turbo_next_frame_idx = opencv_image, frame_idx + frame_idx += turbo_steps + else: + filename = f"{root.timestring}_{frame_idx:09}.png" + save_image(image, 'PIL', filename, args, video_args, root) + + if anim_args.save_depth_maps: + if cmd_opts.lowvram or cmd_opts.medvram: + lowvram.send_everything_to_cpu() + sd_hijack.model_hijack.undo_hijack(sd_model) + devices.torch_gc() + depth_model.to(root.device) + depth = depth_model.predict(opencv_image, anim_args.midas_weight, root.half_precision) + depth_model.save(os.path.join(args.outdir, f"{root.timestring}_depth_{frame_idx:09}.png"), depth) + if cmd_opts.lowvram or cmd_opts.medvram: + depth_model.to('cpu') + devices.torch_gc() + lowvram.setup_for_low_vram(sd_model, cmd_opts.medvram) + sd_hijack.model_hijack.hijack(sd_model) + frame_idx += 1 + + state.assign_current_image(image) + + args.seed = next_seed(args, root) + + last_preview_frame = render_preview(args, anim_args, video_args, root, frame_idx, last_preview_frame) + + JobStatusTracker().update_phase(root.job_id, phase="GENERATING", progress=frame_idx/anim_args.max_frames) + + + if predict_depths and not keep_in_vram: + depth_model.delete_model() # handles adabins too + + if load_raft: + raft_model.delete_model() + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/render_modes.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/render_modes.py new file mode 100644 index 0000000000000000000000000000000000000000..6daa3198c2a5f9abbdb5384ada556dc0f2c6eddb --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/render_modes.py @@ -0,0 +1,179 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import time +import pathlib +import re +import numexpr +from modules.shared import opts, state +from .render import render_animation +from .seed import next_seed +from .video_audio_utilities import vid2frames, render_preview +from .prompt import interpolate_prompts +from .generate import generate +from .animation_key_frames import DeformAnimKeys +from .parseq_adapter import ParseqAdapter +from .save_images import save_image +from .settings import save_settings_from_animation_run + +def render_input_video(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root): + # create a folder for the video input frames to live in + video_in_frame_path = os.path.join(args.outdir, 'inputframes') + os.makedirs(video_in_frame_path, exist_ok=True) + + # save the video frames from input video + print(f"Exporting Video Frames (1 every {anim_args.extract_nth_frame}) frames to {video_in_frame_path}...") + vid2frames(video_path = anim_args.video_init_path, video_in_frame_path=video_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame) + + # determine max frames from length of input frames + anim_args.max_frames = len([f for f in pathlib.Path(video_in_frame_path).glob('*.jpg')]) + args.use_init = True + print(f"Loading {anim_args.max_frames} input frames from {video_in_frame_path} and saving video frames to {args.outdir}") + + if anim_args.use_mask_video: + # create a folder for the mask video input frames to live in + mask_in_frame_path = os.path.join(args.outdir, 'maskframes') + os.makedirs(mask_in_frame_path, exist_ok=True) + + # save the video frames from mask video + print(f"Exporting Video Frames (1 every {anim_args.extract_nth_frame}) frames to {mask_in_frame_path}...") + vid2frames(video_path=anim_args.video_mask_path,video_in_frame_path=mask_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame) + max_mask_frames = len([f for f in pathlib.Path(mask_in_frame_path).glob('*.jpg')]) + + # limit max frames if there are less frames in the video mask compared to input video + if max_mask_frames < anim_args.max_frames : + anim_args.max_mask_frames + print ("Video mask contains less frames than init video, max frames limited to number of mask frames.") + args.use_mask = True + args.overlay_mask = True + + render_animation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root) + +# Modified a copy of the above to allow using masking video with out a init video. +def render_animation_with_video_mask(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root): + # create a folder for the video input frames to live in + mask_in_frame_path = os.path.join(args.outdir, 'maskframes') + os.makedirs(mask_in_frame_path, exist_ok=True) + + # save the video frames from mask video + print(f"Exporting Video Frames (1 every {anim_args.extract_nth_frame}) frames to {mask_in_frame_path}...") + vid2frames(video_path=anim_args.video_mask_path, video_in_frame_path=mask_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame) + args.use_mask = True + #args.overlay_mask = True + + # determine max frames from length of input frames + anim_args.max_frames = len([f for f in pathlib.Path(mask_in_frame_path).glob('*.jpg')]) + #args.use_init = True + print(f"Loading {anim_args.max_frames} input frames from {mask_in_frame_path} and saving video frames to {args.outdir}") + + render_animation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root) + +def get_parsed_value(value, frame_idx, max_f): + pattern = r'`.*?`' + regex = re.compile(pattern) + parsed_value = value + for match in regex.finditer(parsed_value): + matched_string = match.group(0) + parsed_string = matched_string.replace('t', f'{frame_idx}').replace("max_f" , f"{max_f}").replace('`','') + value = numexpr.evaluate(parsed_string) + parsed_value = parsed_value.replace(matched_string, str(value)) + return parsed_value + +def render_interpolation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root): + + # use parseq if manifest is provided + parseq_adapter = ParseqAdapter(parseq_args, anim_args, video_args, controlnet_args, loop_args) + + # expand key frame strings to values + keys = DeformAnimKeys(anim_args) if not parseq_adapter.use_parseq else parseq_adapter.anim_keys + + # create output folder for the batch + os.makedirs(args.outdir, exist_ok=True) + print(f"Saving interpolation animation frames to {args.outdir}") + + # save settings.txt file for the current run + save_settings_from_animation_run(args, anim_args, parseq_args, loop_args, controlnet_args, video_args, root) + + # Compute interpolated prompts + if parseq_adapter.manages_prompts(): + print("Parseq prompts are assumed to already be interpolated - not doing any additional prompt interpolation") + prompt_series = keys.prompts + else: + print("Generating interpolated prompts for all frames") + prompt_series = interpolate_prompts(root.animation_prompts, anim_args.max_frames) + + state.job_count = anim_args.max_frames + frame_idx = 0 + last_preview_frame = 0 + # INTERPOLATION MODE + while frame_idx < anim_args.max_frames: + # print data to cli + prompt_to_print = get_parsed_value(prompt_series[frame_idx].strip(), frame_idx, anim_args.max_frames) + + if prompt_to_print.endswith("--neg"): + prompt_to_print = prompt_to_print[:-5] + print(f"\033[36mInterpolation frame: \033[0m{frame_idx}/{anim_args.max_frames} ") + print(f"\033[32mSeed: \033[0m{args.seed}") + print(f"\033[35mPrompt: \033[0m{prompt_to_print}") + + state.job = f"frame {frame_idx + 1}/{anim_args.max_frames}" + state.job_no = frame_idx + 1 + + if state.interrupted: + break + if state.skipped: + print("\n** PAUSED **") + state.skipped = False + while not state.skipped: + time.sleep(0.1) + print("** RESUMING **") + + # grab inputs for current frame generation + args.prompt = prompt_to_print + args.scale = keys.cfg_scale_schedule_series[frame_idx] + args.pix2pix_img_cfg_scale = keys.pix2pix_img_cfg_scale_series[frame_idx] + + scheduled_sampler_name = keys.sampler_schedule_series[frame_idx].casefold() if anim_args.enable_sampler_scheduling and keys.sampler_schedule_series[frame_idx] is not None else None + args.steps = int(keys.steps_schedule_series[frame_idx]) if anim_args.enable_steps_scheduling and keys.steps_schedule_series[frame_idx] is not None else args.steps + scheduled_clipskip = int(keys.clipskip_schedule_series[frame_idx]) if anim_args.enable_clipskip_scheduling and keys.clipskip_schedule_series[frame_idx] is not None else None + args.checkpoint = keys.checkpoint_schedule_series[frame_idx] if anim_args.enable_checkpoint_scheduling else None + if anim_args.enable_subseed_scheduling: + root.subseed = int(keys.subseed_schedule_series[frame_idx]) + root.subseed_strength = keys.subseed_strength_schedule_series[frame_idx] + else: + root.subseed, root.subseed_strength = keys.subseed_schedule_series[frame_idx], keys.subseed_strength_schedule_series[frame_idx] + if parseq_adapter.manages_seed(): + anim_args.enable_subseed_scheduling = True + root.subseed, root.subseed_strength = int(keys.subseed_schedule_series[frame_idx]), keys.subseed_strength_schedule_series[frame_idx] + args.seed = int(keys.seed_schedule_series[frame_idx]) if (args.seed_behavior == 'schedule' or parseq_adapter.manages_seed()) else args.seed + opts.data["CLIP_stop_at_last_layers"] = scheduled_clipskip if scheduled_clipskip is not None else opts.data["CLIP_stop_at_last_layers"] + + image = generate(args, keys, anim_args, loop_args, controlnet_args, root, parseq_adapter, frame_idx, sampler_name=scheduled_sampler_name) + filename = f"{root.timestring}_{frame_idx:09}.png" + + save_image(image, 'PIL', filename, args, video_args, root) + + state.current_image = image + + if args.seed_behavior != 'schedule': + args.seed = next_seed(args, root) + + last_preview_frame = render_preview(args, anim_args, video_args, root, frame_idx, last_preview_frame) + + frame_idx += 1 + + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/resume.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/resume.py new file mode 100644 index 0000000000000000000000000000000000000000..c3f429a8680c636bb11a8b288a5b7b1572a85b3a --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/resume.py @@ -0,0 +1,72 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import cv2 +from modules.shared import opts + +# Resume requires at least two actual frames in order to work +# 'Actual' frames are defined as frames that go through generation +# - Can't resume from a single frame. +# - If you have a cadence of 10, you need at least 10 frames in order to resume. +# - Resume grabs the last actual frame and the 2nd to last actual frame +# in order to work with cadence properly and feed it the prev_img/next_img + +def get_resume_vars(folder, timestring, cadence): + DEBUG_MODE = opts.data.get("deforum_debug_mode_enabled", False) + # count previous frames + frame_count = 0 + for item in os.listdir(folder): + # don't count txt files or mp4 files + if ".txt" in item or ".mp4" in item: + pass + else: + filename = item.split("_") + # other image file types may be supported in the future, + # so we just count files containing timestring + # that don't contain the depth keyword (depth maps are saved in same folder) + if timestring in filename and "depth" not in filename: + frame_count += 1 + # add this to debugging var + if DEBUG_MODE: + print(f"\033[36mResuming:\033[0m File: {filename}") + + print(f"\033[36mResuming:\033[0m Current frame count: {frame_count}") + + # get last frame from frame count corrected for any trailing cadence frames + last_frame = frame_count - (frame_count % cadence) + + # calculate previous actual frame + prev_frame = last_frame - cadence + + # calculate next actual frame + next_frame = last_frame - 1 + + # get prev_img/next_img from prev/next frame index (files start at 0, so subtract 1 for index var) + path = os.path.join(folder, f"{timestring}_{prev_frame:09}.png") + prev_img = cv2.imread(path) + path = os.path.join(folder, f"{timestring}_{next_frame:09}.png") + next_img = cv2.imread(path) + + # report resume last/next in console + print(f"\033[36mResuming:\033[0m Last frame: {prev_frame} - Next frame: {next_frame} ") + + # returns: + # last frame count, accounting for cadence + # next frame count, accounting for cadence + # prev frame's image cv2 BGR + # next frame's image cv2 BGR + return prev_frame, next_frame, prev_img, next_img diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/rich.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/rich.py new file mode 100644 index 0000000000000000000000000000000000000000..9db7d7af1efb908552c515740a0656998f7041b4 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/rich.py @@ -0,0 +1,18 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +from rich.console import Console +console = Console() \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/run_deforum.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/run_deforum.py new file mode 100644 index 0000000000000000000000000000000000000000..86c70b82728dd4f2532716b65f7a85248e00fd1e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/run_deforum.py @@ -0,0 +1,229 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import shutil +import traceback +import gc +import torch +import modules.shared as shared +from modules.processing import Processed, StableDiffusionProcessingImg2Img +from .args import get_component_names, process_args +from .deforum_tqdm import DeforumTQDM +from .save_images import dump_frames_cache, reset_frames_cache +from .frame_interpolation import process_video_interpolation +from .general_utils import get_deforum_version +from .upscaling import make_upscale_v2 +from .video_audio_utilities import ffmpeg_stitch_video, make_gifski_gif, handle_imgs_deletion, handle_input_frames_deletion, handle_cn_frames_deletion, get_ffmpeg_params, get_ffmpeg_paths +from pathlib import Path +from .settings import save_settings_from_animation_run +from .deforum_controlnet import num_of_models + +from deforum_api import JobStatusTracker +from deforum_api_models import DeforumJobPhase + + +# this global param will contain the latest generated video HTML-data-URL info (for preview inside the UI when needed) +last_vid_data = None + +def run_deforum(*args): + print("started run_deforum") + + f_location, f_crf, f_preset = get_ffmpeg_params() # get params for ffmpeg exec + component_names = get_component_names() + args_dict = {component_names[i]: args[i+2] for i in range(0, len(component_names))} + p = StableDiffusionProcessingImg2Img( + sd_model=shared.sd_model, + outpath_samples = shared.opts.outdir_samples or shared.opts.outdir_img2img_samples + ) # we'll set up the rest later + + times_to_run = 1 + # find how many times in total we need to run according to file count uploaded to Batch Mode upload box + if args_dict['custom_settings_file'] is not None and len(args_dict['custom_settings_file']) > 1: + times_to_run = len(args_dict['custom_settings_file']) + + print(f"times_to_run: {times_to_run}") + for i in range(times_to_run): # run for as many times as we need + job_id = f"{args[0]}-{i}" + JobStatusTracker().update_phase(job_id, DeforumJobPhase.PREPARING) + print(f"\033[4;33mDeforum extension for auto1111 webui\033[0m") + print(f"Git commit: {get_deforum_version()}") + print(f"Starting job {job_id}...") + args_dict['self'] = None + args_dict['p'] = p + try: + args_loaded_ok, root, args, anim_args, video_args, parseq_args, loop_args, controlnet_args = process_args(args_dict, i) + except Exception as e: + JobStatusTracker().fail_job(job_id, error_type="TERMINAL", message="Invalid arguments.") + print("\n*START OF TRACEBACK*") + traceback.print_exc() + print("*END OF TRACEBACK*\nUser friendly error message:") + print(f"Error: {e}. Please, check your prompts with a JSON validator.") + return None, None, None, f"Error: '{e}'. Please, check your prompts with a JSON validator. Full error message is in your terminal/ cli." + if args_loaded_ok is False: + if times_to_run > 1: + print(f"\033[31mWARNING:\033[0m skipped running from the following setting file, as it contains an invalid JSON: {os.path.basename(args_dict['custom_settings_file'][i].name)}") + continue + else: + JobStatusTracker().fail_job(job_id, error_type="TERMINAL", message="Invalid settings file.") + print(f"\033[31mERROR!\033[0m Couldn't load data from '{os.path.basename(args_dict['custom_settings_file'][i].name)}'. Make sure it's a valid JSON using a JSON validator") + return None, None, None, f"Couldn't load data from '{os.path.basename(args_dict['custom_settings_file'][i].name)}'. Make sure it's a valid JSON using a JSON validator" + + root.initial_clipskip = shared.opts.data.get("CLIP_stop_at_last_layers", 1) + root.initial_img2img_fix_steps = shared.opts.data.get("img2img_fix_steps", False) + root.initial_noise_multiplier = shared.opts.data.get("initial_noise_multiplier", 1.0) + root.initial_ddim_eta = shared.opts.data.get("eta_ddim", 0.0) + root.initial_ancestral_eta = shared.opts.data.get("eta_ancestral", 1.0) + root.job_id = job_id + + # clean up unused memory + reset_frames_cache(root) + gc.collect() + torch.cuda.empty_cache() + + # Import them *here* or we add 3 seconds to initial webui launch-time. user doesn't feel it when we import inside the func: + from .render import render_animation + from .render_modes import render_input_video, render_animation_with_video_mask, render_interpolation + + tqdm_backup = shared.total_tqdm + shared.total_tqdm = DeforumTQDM(args, anim_args, parseq_args, video_args) + try: # dispatch to appropriate renderer + JobStatusTracker().update_phase(job_id, DeforumJobPhase.GENERATING) + JobStatusTracker().update_output_info(job_id, outdir=args.outdir, timestring=root.timestring) + if anim_args.animation_mode == '2D' or anim_args.animation_mode == '3D': + if anim_args.use_mask_video: + render_animation_with_video_mask(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root) # allow mask video without an input video + else: + render_animation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root) + elif anim_args.animation_mode == 'Video Input': + render_input_video(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root)#TODO: prettify code + elif anim_args.animation_mode == 'Interpolation': + render_interpolation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root) + else: + print('Other modes are not available yet!') + except Exception as e: + JobStatusTracker().fail_job(job_id, error_type="RETRYABLE", message="Generation error.") + print("\n*START OF TRACEBACK*") + traceback.print_exc() + print("*END OF TRACEBACK*\n") + print("User friendly error message:") + print(f"Error: {e}. Please, check your schedules/ init values.") + return None, None, None, f"Error: '{e}'. Before reporting, please check your schedules/ init values. Full error message is in your terminal/ cli." + finally: + shared.total_tqdm = tqdm_backup + # reset shared.opts.data vals to what they were before we started the animation. Else they will stick to the last value - it actually updates webui settings (config.json) + shared.opts.data["CLIP_stop_at_last_layers"] = root.initial_clipskip + shared.opts.data["img2img_fix_steps"] = root.initial_img2img_fix_steps + shared.opts.data["initial_noise_multiplier"] = root.initial_noise_multiplier + shared.opts.data["eta_ddim"] = root.initial_ddim_eta + shared.opts.data["eta_ancestral"] = root.initial_ancestral_eta + + JobStatusTracker().update_phase(job_id, DeforumJobPhase.POST_PROCESSING) + + if video_args.store_frames_in_ram: + dump_frames_cache(root) + + from base64 import b64encode + + # Delete folder with duplicated imgs from OS temp folder + shutil.rmtree(root.tmp_deforum_run_duplicated_folder, ignore_errors=True) + + # Decide whether we need to try and frame interpolate later + need_to_frame_interpolate = False + if video_args.frame_interpolation_engine != "None" and not video_args.skip_video_creation and not video_args.store_frames_in_ram: + need_to_frame_interpolate = True + + if video_args.skip_video_creation: + print("\nSkipping video creation, uncheck 'Skip video creation' in 'Output' tab if you want to get a video too :)") + else: + # Stitch video using ffmpeg! + try: + f_location, f_crf, f_preset = get_ffmpeg_params() # get params for ffmpeg exec + image_path, mp4_path, real_audio_track, srt_path = get_ffmpeg_paths(args.outdir, root.timestring, anim_args, video_args) + ffmpeg_stitch_video(ffmpeg_location=f_location, fps=video_args.fps, outmp4_path=mp4_path, stitch_from_frame=0, stitch_to_frame=anim_args.max_frames, imgs_path=image_path, add_soundtrack=video_args.add_soundtrack, audio_path=real_audio_track, crf=f_crf, preset=f_preset, srt_path=srt_path) + mp4 = open(mp4_path, 'rb').read() + data_url = f"data:video/mp4;base64, {b64encode(mp4).decode()}" + global last_vid_data + last_vid_data = f'

Deforum extension for auto1111 — version 2.4b

' + except Exception as e: + if need_to_frame_interpolate: + print(f"FFMPEG DID NOT STITCH ANY VIDEO. However, you requested to frame interpolate - so we will continue to frame interpolation, but you'll be left only with the interpolated frames and not a video, since ffmpeg couldn't run. Original ffmpeg error: {e}") + else: + print(f"** FFMPEG DID NOT STITCH ANY VIDEO ** Error: {e}") + pass + + if video_args.make_gif and not video_args.skip_video_creation and not video_args.store_frames_in_ram: + make_gifski_gif(imgs_raw_path = args.outdir, imgs_batch_id = root.timestring, fps = video_args.fps, models_folder = root.models_path, current_user_os = root.current_user_os) + + # Upscale video once generation is done: + if video_args.r_upscale_video and not video_args.skip_video_creation and not video_args.store_frames_in_ram: + # out mp4 path is defined in make_upscale func + make_upscale_v2(upscale_factor = video_args.r_upscale_factor, upscale_model = video_args.r_upscale_model, keep_imgs = video_args.r_upscale_keep_imgs, imgs_raw_path = args.outdir, imgs_batch_id = root.timestring, fps = video_args.fps, deforum_models_path = root.models_path, current_user_os = root.current_user_os, ffmpeg_location=f_location, stitch_from_frame=0, stitch_to_frame=anim_args.max_frames, ffmpeg_crf=f_crf, ffmpeg_preset=f_preset, add_soundtrack = video_args.add_soundtrack ,audio_path=real_audio_track, srt_path=srt_path) + + # FRAME INTERPOLATION TIME + if need_to_frame_interpolate: + print(f"Got a request to *frame interpolate* using {video_args.frame_interpolation_engine}") + path_to_interpolate = args.outdir + + upscaled_folder_path = os.path.join(args.outdir, f"{root.timestring}_upscaled") + use_upscaled_images = video_args.frame_interpolation_use_upscaled and os.path.exists(upscaled_folder_path) and len(os.listdir(upscaled_folder_path)) > 1 + if use_upscaled_images: + print(f"Using upscaled images for frame interpolation.") + path_to_interpolate = upscaled_folder_path + + ouput_vid_path = process_video_interpolation(frame_interpolation_engine=video_args.frame_interpolation_engine, frame_interpolation_x_amount=video_args.frame_interpolation_x_amount,frame_interpolation_slow_mo_enabled=video_args.frame_interpolation_slow_mo_enabled, frame_interpolation_slow_mo_amount=video_args.frame_interpolation_slow_mo_amount, orig_vid_fps=video_args.fps, deforum_models_path=root.models_path, real_audio_track=real_audio_track, raw_output_imgs_path=path_to_interpolate, img_batch_id=root.timestring, ffmpeg_location=f_location, ffmpeg_crf=f_crf, ffmpeg_preset=f_preset, keep_interp_imgs=video_args.frame_interpolation_keep_imgs, orig_vid_name=None, resolution=None, srt_path=srt_path) + + # If the interpolated video was stitched from the upscaled frames, the video needs to be moved + # out of the upscale directory. + if use_upscaled_images and ouput_vid_path and os.path.exists(ouput_vid_path): + ouput_vid_path_final = os.path.join(args.outdir, Path(ouput_vid_path).stem + "_upscaled.mp4") + print(f"Moving upscaled, interpolated vid from {ouput_vid_path} to {ouput_vid_path_final}") + shutil.move(ouput_vid_path, ouput_vid_path_final) + + if video_args.delete_imgs and not video_args.skip_video_creation: + handle_imgs_deletion(vid_path=mp4_path, imgs_folder_path=args.outdir, batch_id=root.timestring) + + if video_args.delete_input_frames: + # Check if the path exists + if os.path.exists(os.path.join(args.outdir, 'inputframes')): + print(f"Deleting inputframes") + handle_input_frames_deletion(imgs_folder_path=os.path.join(args.outdir, 'inputframes')) + # Now do CN input frame deletion + cn_inputframes_list = [os.path.join(args.outdir, f'controlnet_{i}_inputframes') for i in range(1, num_of_models + 1)] + handle_cn_frames_deletion(cn_inputframes_list) + + root.initial_info = (root.initial_info or " ") + f"\n The animation is stored in {args.outdir}" + reset_frames_cache(root) # cleanup the RAM in any case + processed = Processed(p, [root.first_frame], 0, root.initial_info) + + shared.total_tqdm.clear() + + generation_info_js = processed.js() + + if shared.opts.data.get("deforum_enable_persistent_settings", False): + persistent_sett_path = shared.opts.data.get("deforum_persistent_settings_path") + save_settings_from_animation_run(args, anim_args, parseq_args, loop_args, controlnet_args, video_args, root, persistent_sett_path) + + # Close the pipeline, not to interfere with ControlNet + try: + p.close() + except Exception as e: + ... + + if (not shared.state.interrupted): + JobStatusTracker().complete_job(root.job_id) + + return processed.images, root.timestring, generation_info_js, processed.info diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/save_images.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/save_images.py new file mode 100644 index 0000000000000000000000000000000000000000..0e949c2d9291d0dfaa58646d9b36df42e9184a0d --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/save_images.py @@ -0,0 +1,45 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import cv2 +import gc +import time + +def get_output_folder(output_path, batch_folder): + out_path = os.path.join(output_path,time.strftime('%Y-%m')) + if batch_folder != "": + out_path = os.path.join(out_path, batch_folder) + os.makedirs(out_path, exist_ok=True) + return out_path + +def save_image(image, image_type, filename, args, video_args, root): + if video_args.store_frames_in_ram: + root.frames_cache.append({'path':os.path.join(args.outdir, filename), 'image':image, 'image_type':image_type}) + else: + image.save(os.path.join(args.outdir, filename)) + +def reset_frames_cache(root): + root.frames_cache = [] + gc.collect() + +def dump_frames_cache(root): + for image_cache in root.frames_cache: + if image_cache['image_type'] == 'cv2': + cv2.imwrite(image_cache['path'], image_cache['image']) + elif image_cache['image_type'] == 'PIL': + image_cache['image'].save(image_cache['path']) + # do not reset the cache since we're going to add frame erasing later function #TODO diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/seed.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/seed.py new file mode 100644 index 0000000000000000000000000000000000000000..f631b4e2709cbe992795a0e272adfa6f23e0de0d --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/seed.py @@ -0,0 +1,33 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import random + +def next_seed(args, root): + if args.seed_behavior == 'iter': + args.seed += 1 if root.seed_internal % args.seed_iter_N == 0 else 0 + root.seed_internal += 1 + elif args.seed_behavior == 'ladder': + args.seed += 2 if root.seed_internal == 0 else -1 + root.seed_internal = 1 if root.seed_internal == 0 else 0 + elif args.seed_behavior == 'alternate': + args.seed += 1 if root.seed_internal == 0 else -1 + root.seed_internal = 1 if root.seed_internal == 0 else 0 + elif args.seed_behavior == 'fixed': + pass # always keep seed the same + else: + args.seed = random.randint(0, 2**32 - 1) + return args.seed diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/settings.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..bd1132d9b63f35a6861ca5250c55246e84a3c727 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/settings.py @@ -0,0 +1,181 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import json +import modules.shared as sh +from .args import DeforumArgs, DeforumAnimArgs, DeforumOutputArgs, ParseqArgs, LoopArgs, get_settings_component_names, pack_args +from .deforum_controlnet import controlnet_component_names +from .defaults import mask_fill_choices +from .deprecation_utils import handle_deprecated_settings +from .general_utils import get_deforum_version, clean_gradio_path_strings + +def get_keys_to_exclude(): + return ["init_sample", "perlin_w", "perlin_h", "image_path", "outdir", "init_image_box"] + # perlin params are used just not shown in ui for now, so not to be deleted + # image_path and outdir are in use, not to be deleted + # init_image_box is PIL object not string, so ignore. + +def load_args(args_dict_main, args, anim_args, parseq_args, loop_args, controlnet_args, video_args, custom_settings_file, root, run_id): + custom_settings_file = custom_settings_file[run_id] + print(f"reading custom settings from {custom_settings_file.name}") + if not os.path.isfile(custom_settings_file.name): + print('Custom settings file does not exist. Using in-notebook settings.') + return + with open(custom_settings_file.name, "r") as f: + try: + jdata = json.loads(f.read()) + except: + return False + handle_deprecated_settings(jdata) + root.animation_prompts = jdata.get("prompts", root.animation_prompts) + if "animation_prompts_positive" in jdata: + args_dict_main['animation_prompts_positive'] = jdata["animation_prompts_positive"] + if "animation_prompts_negative" in jdata: + args_dict_main['animation_prompts_negative'] = jdata["animation_prompts_negative"] + keys_to_exclude = get_keys_to_exclude() + for args_namespace in [args, anim_args, parseq_args, loop_args, controlnet_args, video_args]: + for k, v in vars(args_namespace).items(): + if k not in keys_to_exclude: + if k in jdata: + setattr(args_namespace, k, jdata[k]) + else: + print(f"Key {k} doesn't exist in the custom settings data! Using default value of {v}") + print(args, anim_args, parseq_args, loop_args) + return True + +# save settings function that get calls when run_deforum is being called +def save_settings_from_animation_run(args, anim_args, parseq_args, loop_args, controlnet_args, video_args, root, full_out_file_path = None): + if full_out_file_path: + args.__dict__["seed"] = root.raw_seed + args.__dict__["batch_name"] = root.raw_batch_name + args.__dict__["prompts"] = root.animation_prompts + args.__dict__["positive_prompts"] = args.positive_prompts + args.__dict__["negative_prompts"] = args.negative_prompts + exclude_keys = get_keys_to_exclude() + settings_filename = full_out_file_path if full_out_file_path else os.path.join(args.outdir, f"{root.timestring}_settings.txt") + with open(settings_filename, "w+", encoding="utf-8") as f: + s = {} + for d in (args.__dict__, anim_args.__dict__, parseq_args.__dict__, loop_args.__dict__, controlnet_args.__dict__, video_args.__dict__): + s.update({k: v for k, v in d.items() if k not in exclude_keys}) + s["sd_model_name"] = sh.sd_model.sd_checkpoint_info.name + s["sd_model_hash"] = sh.sd_model.sd_checkpoint_info.hash + s["deforum_git_commit_id"] = get_deforum_version() + json.dump(s, f, ensure_ascii=False, indent=4) + +# In gradio gui settings save/ load funcs: +def save_settings(*args, **kwargs): + settings_path = args[0].strip() + settings_path = clean_gradio_path_strings(settings_path) + settings_path = os.path.realpath(settings_path) + settings_component_names = get_settings_component_names() + data = {settings_component_names[i]: args[i+1] for i in range(0, len(settings_component_names))} + args_dict = pack_args(data, DeforumArgs) + anim_args_dict = pack_args(data, DeforumAnimArgs) + parseq_dict = pack_args(data, ParseqArgs) + args_dict["prompts"] = json.loads(data['animation_prompts']) + args_dict["animation_prompts_positive"] = data['animation_prompts_positive'] + args_dict["animation_prompts_negative"] = data['animation_prompts_negative'] + loop_dict = pack_args(data, LoopArgs) + controlnet_dict = pack_args(data, controlnet_component_names) + video_args_dict = pack_args(data, DeforumOutputArgs) + combined = {**args_dict, **anim_args_dict, **parseq_dict, **loop_dict, **controlnet_dict, **video_args_dict} + exclude_keys = get_keys_to_exclude() + filtered_combined = {k: v for k, v in combined.items() if k not in exclude_keys} + filtered_combined["sd_model_name"] = sh.sd_model.sd_checkpoint_info.name + filtered_combined["sd_model_hash"] = sh.sd_model.sd_checkpoint_info.hash + filtered_combined["deforum_git_commit_id"] = get_deforum_version() + print(f"saving custom settings to {settings_path}") + with open(settings_path, "w", encoding='utf-8') as f: + f.write(json.dumps(filtered_combined, ensure_ascii=False, indent=4)) + + return [""] + +def load_all_settings(*args, ui_launch=False, **kwargs): + import gradio as gr + settings_path = args[0].strip() + settings_path = clean_gradio_path_strings(settings_path) + settings_path = os.path.realpath(settings_path) + settings_component_names = get_settings_component_names() + data = {settings_component_names[i]: args[i+1] for i in range(len(settings_component_names))} + print(f"reading custom settings from {settings_path}") + + if not os.path.isfile(settings_path): + print('The custom settings file does not exist. The values will be unchanged.') + if ui_launch: + return ({key: gr.update(value=value) for key, value in data.items()},) + else: + return list(data.values()) + [""] + + with open(settings_path, "r", encoding='utf-8') as f: + jdata = json.load(f) + handle_deprecated_settings(jdata) + if 'animation_prompts' in jdata: + jdata['prompts'] = jdata['animation_prompts'] + + result = {} + for key, default_val in data.items(): + val = jdata.get(key, default_val) + if key == 'sampler' and isinstance(val, int): + from modules.sd_samplers import samplers_for_img2img + val = samplers_for_img2img[val].name + elif key == 'fill' and isinstance(val, int): + val = mask_fill_choices[val] + elif key in {'reroll_blank_frames', 'noise_type'} and key not in jdata: + default_key_val = (DeforumArgs if key != 'noise_type' else DeforumAnimArgs)[key] + print(f"{key} not found in load file, using default value: {default_key_val}") + val = default_key_val + elif key in {'animation_prompts_positive', 'animation_prompts_negative'}: + val = jdata.get(key, default_val) + elif key == 'animation_prompts': + val = json.dumps(jdata['prompts'], ensure_ascii=False, indent=4) + + result[key] = val + + if ui_launch: + return ({key: gr.update(value=value) for key, value in result.items()},) + else: + return list(result.values()) + [""] + + +def load_video_settings(*args, **kwargs): + video_settings_path = args[0].strip() + vid_args_names = list(DeforumOutputArgs().keys()) + data = {vid_args_names[i]: args[i+1] for i in range(0, len(vid_args_names))} + print(f"reading custom video settings from {video_settings_path}") + jdata = {} + if not os.path.isfile(video_settings_path): + print('The custom video settings file does not exist. The values will be unchanged.') + return [data[name] for name in vid_args_names] + [""] + else: + with open(video_settings_path, "r") as f: + jdata = json.loads(f.read()) + handle_deprecated_settings(jdata) + ret = [] + + for key in data: + if key == 'add_soundtrack': + add_soundtrack_val = jdata[key] + if type(add_soundtrack_val) == bool: + ret.append('File' if add_soundtrack_val else 'None') + else: + ret.append(add_soundtrack_val) + elif key in jdata: + ret.append(jdata[key]) + else: + ret.append(data[key]) + + return ret \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/__init__.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b2a0eea190658f294d0a49363ea28543087bdf6 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/__init__.py @@ -0,0 +1 @@ +from .unet_adaptive_bins import UnetAdaptiveBins diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/layers.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..499cd8cc1ec5973da5718d184d36b187869f9c28 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/layers.py @@ -0,0 +1,36 @@ +import torch +import torch.nn as nn + + +class PatchTransformerEncoder(nn.Module): + def __init__(self, in_channels, patch_size=10, embedding_dim=128, num_heads=4): + super(PatchTransformerEncoder, self).__init__() + encoder_layers = nn.TransformerEncoderLayer(embedding_dim, num_heads, dim_feedforward=1024) + self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers=4) # takes shape S,N,E + + self.embedding_convPxP = nn.Conv2d(in_channels, embedding_dim, + kernel_size=patch_size, stride=patch_size, padding=0) + + self.positional_encodings = nn.Parameter(torch.rand(500, embedding_dim), requires_grad=True) + + def forward(self, x): + embeddings = self.embedding_convPxP(x).flatten(2) # .shape = n,c,s = n, embedding_dim, s + # embeddings = nn.functional.pad(embeddings, (1,0)) # extra special token at start ? + embeddings = embeddings + self.positional_encodings[:embeddings.shape[2], :].T.unsqueeze(0) + + # change to S,N,E format required by transformer + embeddings = embeddings.permute(2, 0, 1) + x = self.transformer_encoder(embeddings) # .shape = S, N, E + return x + + +class PixelWiseDotProduct(nn.Module): + def __init__(self): + super(PixelWiseDotProduct, self).__init__() + + def forward(self, x, K): + n, c, h, w = x.size() + _, cout, ck = K.size() + assert c == ck, "Number of channels in x and Embedding dimension (at dim 2) of K matrix must match" + y = torch.matmul(x.view(n, c, h * w).permute(0, 2, 1), K.permute(0, 2, 1)) # .shape = n, hw, cout + return y.permute(0, 2, 1).view(n, cout, h, w) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/miniViT.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/miniViT.py new file mode 100644 index 0000000000000000000000000000000000000000..8a619734aaa82e73fbe37800a6a1dd12e83020a2 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/miniViT.py @@ -0,0 +1,45 @@ +import torch +import torch.nn as nn + +from .layers import PatchTransformerEncoder, PixelWiseDotProduct + + +class mViT(nn.Module): + def __init__(self, in_channels, n_query_channels=128, patch_size=16, dim_out=256, + embedding_dim=128, num_heads=4, norm='linear'): + super(mViT, self).__init__() + self.norm = norm + self.n_query_channels = n_query_channels + self.patch_transformer = PatchTransformerEncoder(in_channels, patch_size, embedding_dim, num_heads) + self.dot_product_layer = PixelWiseDotProduct() + + self.conv3x3 = nn.Conv2d(in_channels, embedding_dim, kernel_size=3, stride=1, padding=1) + self.regressor = nn.Sequential(nn.Linear(embedding_dim, 256), + nn.LeakyReLU(), + nn.Linear(256, 256), + nn.LeakyReLU(), + nn.Linear(256, dim_out)) + + def forward(self, x): + # n, c, h, w = x.size() + tgt = self.patch_transformer(x.clone()) # .shape = S, N, E + + x = self.conv3x3(x) + + regression_head, queries = tgt[0, ...], tgt[1:self.n_query_channels + 1, ...] + + # Change from S, N, E to N, S, E + queries = queries.permute(1, 0, 2) + range_attention_maps = self.dot_product_layer(x, queries) # .shape = n, n_query_channels, h, w + + y = self.regressor(regression_head) # .shape = N, dim_out + if self.norm == 'linear': + y = torch.relu(y) + eps = 0.1 + y = y + eps + elif self.norm == 'softmax': + return torch.softmax(y, dim=1), range_attention_maps + else: + y = torch.sigmoid(y) + y = y / y.sum(dim=1, keepdim=True) + return y, range_attention_maps diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/unet_adaptive_bins.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/unet_adaptive_bins.py new file mode 100644 index 0000000000000000000000000000000000000000..dc42039e89fee4f6185a973adbc3b0788f8ff0cb --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/adabins/unet_adaptive_bins.py @@ -0,0 +1,158 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import os +from pathlib import Path +from .miniViT import mViT +from modules.shared import opts + +class UpSampleBN(nn.Module): + def __init__(self, skip_input, output_features): + super(UpSampleBN, self).__init__() + + self._net = nn.Sequential(nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(output_features), + nn.LeakyReLU(), + nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(output_features), + nn.LeakyReLU()) + + def forward(self, x, concat_with): + up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True) + f = torch.cat([up_x, concat_with], dim=1) + return self._net(f) + + +class DecoderBN(nn.Module): + def __init__(self, num_features=2048, num_classes=1, bottleneck_features=2048): + super(DecoderBN, self).__init__() + features = int(num_features) + + self.conv2 = nn.Conv2d(bottleneck_features, features, kernel_size=1, stride=1, padding=1) + + self.up1 = UpSampleBN(skip_input=features // 1 + 112 + 64, output_features=features // 2) + self.up2 = UpSampleBN(skip_input=features // 2 + 40 + 24, output_features=features // 4) + self.up3 = UpSampleBN(skip_input=features // 4 + 24 + 16, output_features=features // 8) + self.up4 = UpSampleBN(skip_input=features // 8 + 16 + 8, output_features=features // 16) + + # self.up5 = UpSample(skip_input=features // 16 + 3, output_features=features//16) + self.conv3 = nn.Conv2d(features // 16, num_classes, kernel_size=3, stride=1, padding=1) + # self.act_out = nn.Softmax(dim=1) if output_activation == 'softmax' else nn.Identity() + + def forward(self, features): + x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[ + 11] + + x_d0 = self.conv2(x_block4) + + x_d1 = self.up1(x_d0, x_block3) + x_d2 = self.up2(x_d1, x_block2) + x_d3 = self.up3(x_d2, x_block1) + x_d4 = self.up4(x_d3, x_block0) + # x_d5 = self.up5(x_d4, features[0]) + out = self.conv3(x_d4) + # out = self.act_out(out) + # if with_features: + # return out, features[-1] + # elif with_intermediate: + # return out, [x_block0, x_block1, x_block2, x_block3, x_block4, x_d1, x_d2, x_d3, x_d4] + return out + + +class Encoder(nn.Module): + def __init__(self, backend): + super(Encoder, self).__init__() + self.original_model = backend + + def forward(self, x): + features = [x] + for k, v in self.original_model._modules.items(): + if (k == 'blocks'): + for ki, vi in v._modules.items(): + features.append(vi(features[-1])) + else: + features.append(v(features[-1])) + return features + + +class UnetAdaptiveBins(nn.Module): + def __init__(self, backend, n_bins=100, min_val=0.1, max_val=10, norm='linear'): + super(UnetAdaptiveBins, self).__init__() + self.num_classes = n_bins + self.min_val = min_val + self.max_val = max_val + self.encoder = Encoder(backend) + self.adaptive_bins_layer = mViT(128, n_query_channels=128, patch_size=16, + dim_out=n_bins, + embedding_dim=128, norm=norm) + + self.decoder = DecoderBN(num_classes=128) + self.conv_out = nn.Sequential(nn.Conv2d(128, n_bins, kernel_size=1, stride=1, padding=0), + nn.Softmax(dim=1)) + + def forward(self, x, **kwargs): + unet_out = self.decoder(self.encoder(x), **kwargs) + bin_widths_normed, range_attention_maps = self.adaptive_bins_layer(unet_out) + out = self.conv_out(range_attention_maps) + + # Post process + # n, c, h, w = out.shape + # hist = torch.sum(out.view(n, c, h * w), dim=2) / (h * w) # not used for training + + bin_widths = (self.max_val - self.min_val) * bin_widths_normed # .shape = N, dim_out + bin_widths = nn.functional.pad(bin_widths, (1, 0), mode='constant', value=self.min_val) + bin_edges = torch.cumsum(bin_widths, dim=1) + + centers = 0.5 * (bin_edges[:, :-1] + bin_edges[:, 1:]) + n, dout = centers.size() + centers = centers.view(n, dout, 1, 1) + + pred = torch.sum(out * centers, dim=1, keepdim=True) + + return bin_edges, pred + + def get_1x_lr_params(self): # lr/10 learning rate + return self.encoder.parameters() + + def get_10x_lr_params(self): # lr learning rate + modules = [self.decoder, self.adaptive_bins_layer, self.conv_out] + for m in modules: + yield from m.parameters() + + @classmethod + def build(cls, n_bins, **kwargs): + DEBUG_MODE = opts.data.get("deforum_debug_mode_enabled", False) + basemodel_name = 'tf_efficientnet_b5_ap' + + print('Loading AdaBins model...') + predicted_torch_model_cache_path = str(Path.home()) + '\\.cache\\torch\\hub\\rwightman_gen-efficientnet-pytorch_master' + predicted_gep_cache_testilfe = Path(predicted_torch_model_cache_path + '\\hubconf.py') + #print(f"predicted_gep_cache_testilfe: {predicted_gep_cache_testilfe}") + # try to fetch the models from cache, and only if it can't be find, download from the internet (to enable offline usage) + if os.path.isfile(predicted_gep_cache_testilfe): + basemodel = torch.hub.load(predicted_torch_model_cache_path, basemodel_name, pretrained=True, source = 'local') + else: + basemodel = torch.hub.load('rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True) + if DEBUG_MODE: + print('Done.') + + # Remove last layer + if DEBUG_MODE: + print('Removing last two layers (global_pool & classifier).') + basemodel.global_pool = nn.Identity() + basemodel.classifier = nn.Identity() + + # Building Encoder-Decoder model + if DEBUG_MODE: + print('Building Encoder-Decoder model..', end='') + m = cls(basemodel, n_bins=n_bins, **kwargs) + if DEBUG_MODE: + print('Done.') + return m + + +if __name__ == '__main__': + model = UnetAdaptiveBins.build(100) + x = torch.rand(2, 3, 480, 640) + bins, pred = model(x) + print(bins.shape, pred.shape) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/LICENSE b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4a198f59fdfac7d3139e3269bd6bc3f23eee8033 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/LICENSE @@ -0,0 +1,21 @@ +MIT License + +This license does not apply to the model weights. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Quickstart.ipynb b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Quickstart.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..df223f2a16f4bb61f869983a1e6107469c60c340 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Quickstart.ipynb @@ -0,0 +1,107 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import requests\n", + "\n", + "! wget https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download -O weights.zip\n", + "! unzip -d weights -j weights.zip\n", + "from models.clipseg import CLIPDensePredT\n", + "from PIL import Image\n", + "from torchvision import transforms\n", + "from matplotlib import pyplot as plt\n", + "\n", + "# load model\n", + "model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64)\n", + "model.eval();\n", + "\n", + "# non-strict, because we only stored decoder weights (not CLIP weights)\n", + "model.load_state_dict(torch.load('weights/rd64-uni.pth', map_location=torch.device('cpu')), strict=False);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load and normalize `example_image.jpg`. You can also load through an URL." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load and normalize image\n", + "input_image = Image.open('example_image.jpg')\n", + "\n", + "# or load from URL...\n", + "# image_url = 'https://farm5.staticflickr.com/4141/4856248695_03475782dc_z.jpg'\n", + "# input_image = Image.open(requests.get(image_url, stream=True).raw)\n", + "\n", + "transform = transforms.Compose([\n", + " transforms.ToTensor(),\n", + " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", + " transforms.Resize((352, 352)),\n", + "])\n", + "img = transform(input_image).unsqueeze(0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predict and visualize (this might take a few seconds if running without GPU support)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prompts = ['a glass', 'something to fill', 'wood', 'a jar']\n", + "\n", + "# predict\n", + "with torch.no_grad():\n", + " preds = model(img.repeat(4,1,1,1), prompts)[0]\n", + "\n", + "# visualize prediction\n", + "_, ax = plt.subplots(1, 5, figsize=(15, 4))\n", + "[a.axis('off') for a in ax.flatten()]\n", + "ax[0].imshow(input_image)\n", + "[ax[i+1].imshow(torch.sigmoid(preds[i][0])) for i in range(4)];\n", + "[ax[i+1].text(0, -15, prompts[i]) for i in range(4)];" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "800ed241f7db2bd3aa6942aa3be6809cdb30ee6b0a9e773dfecfa9fef1f4c586" + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Readme.md b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Readme.md new file mode 100644 index 0000000000000000000000000000000000000000..b12ef244eeb5021f863072bd1fb127b92a5819c2 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Readme.md @@ -0,0 +1,84 @@ +# Image Segmentation Using Text and Image Prompts +This repository contains the code used in the paper ["Image Segmentation Using Text and Image Prompts"](https://arxiv.org/abs/2112.10003). + +**The Paper has been accepted to CVPR 2022!** + +drawing + +The systems allows to create segmentation models without training based on: +- An arbitrary text query +- Or an image with a mask highlighting stuff or an object. + +### Quick Start + +In the `Quickstart.ipynb` notebook we provide the code for using a pre-trained CLIPSeg model. If you run the notebook locally, make sure you downloaded the `rd64-uni.pth` weights, either manually or via git lfs extension. +It can also be used interactively using [MyBinder](https://mybinder.org/v2/gh/timojl/clipseg/HEAD?labpath=Quickstart.ipynb) +(please note that the VM does not use a GPU, thus inference takes a few seconds). + + +### Dependencies +This code base depends on pytorch, torchvision and clip (`pip install git+https://github.com/openai/CLIP.git`). +Additional dependencies are hidden for double blind review. + + +### Datasets + +* `PhraseCut` and `PhraseCutPlus`: Referring expression dataset +* `PFEPascalWrapper`: Wrapper class for PFENet's Pascal-5i implementation +* `PascalZeroShot`: Wrapper class for PascalZeroShot +* `COCOWrapper`: Wrapper class for COCO. + +### Models + +* `CLIPDensePredT`: CLIPSeg model with transformer-based decoder. +* `ViTDensePredT`: CLIPSeg model with transformer-based decoder. + +### Third Party Dependencies +For some of the datasets third party dependencies are required. Run the following commands in the `third_party` folder. +```bash +git clone https://github.com/cvlab-yonsei/JoEm +git clone https://github.com/Jia-Research-Lab/PFENet.git +git clone https://github.com/ChenyunWu/PhraseCutDataset.git +git clone https://github.com/juhongm999/hsnet.git +``` + +### Weights + +The MIT license does not apply to these weights. + +We provide two model weights, for D=64 (4.1MB) and D=16 (1.1MB). +``` +wget https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download -O weights.zip +unzip -d weights -j weights.zip +``` + + +### Training and Evaluation + +To train use the `training.py` script with experiment file and experiment id parameters. E.g. `python training.py phrasecut.yaml 0` will train the first phrasecut experiment which is defined by the `configuration` and first `individual_configurations` parameters. Model weights will be written in `logs/`. + +For evaluation use `score.py`. E.g. `python score.py phrasecut.yaml 0 0` will train the first phrasecut experiment of `test_configuration` and the first configuration in `individual_configurations`. + + +### Usage of PFENet Wrappers + +In order to use the dataset and model wrappers for PFENet, the PFENet repository needs to be cloned to the root folder. +`git clone https://github.com/Jia-Research-Lab/PFENet.git ` + + +### License + +The source code files in this repository (excluding model weights) are released under MIT license. + +### Citation +``` +@InProceedings{lueddecke22_cvpr, + author = {L\"uddecke, Timo and Ecker, Alexander}, + title = {Image Segmentation Using Text and Image Prompts}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {7086-7096} +} + +``` diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Tables.ipynb b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Tables.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..4799d46e48eb153c7126bb13897a13a9856f138a --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Tables.ipynb @@ -0,0 +1,349 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import clip\n", + "from evaluation_utils import norm, denorm\n", + "from general_utils import *\n", + "from datasets.lvis_oneshot3 import LVIS_OneShot3, LVIS_OneShot" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# PhraseCut" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pc = experiment('experiments/phrasecut.yaml', nums=':6').dataframe()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tab1 = pc[['name', 'pc_miou_best', 'pc_fgiou_best', 'pc_ap']]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cols = ['pc_miou_0.3', 'pc_fgiou_0.3', 'pc_ap']\n", + "tab1 = pc[['name'] + cols]\n", + "for k in cols:\n", + " tab1.loc[:, k] = (100 * tab1.loc[:, k]).round(1)\n", + "tab1.loc[:, 'name'] = ['CLIPSeg (PC+)', 'CLIPSeg (PC, $D=128$)', 'CLIPSeg (PC)', 'CLIP-Deconv', 'ViTSeg (PC+)', 'ViTSeg (PC)']\n", + "tab1.insert(1, 't', [0.3]*tab1.shape[0])\n", + "print(tab1.to_latex(header=False, index=False))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For 0.1 threshold" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cols = ['pc_miou_0.1', 'pc_fgiou_0.1', 'pc_ap']\n", + "tab1 = pc[['name'] + cols]\n", + "for k in cols:\n", + " tab1.loc[:, k] = (100 * tab1.loc[:, k]).round(1)\n", + "tab1.loc[:, 'name'] = ['CLIPSeg (PC+)', 'CLIPSeg (PC, $D=128$)', 'CLIPSeg (PC)', 'CLIP-Deconv', 'ViTSeg (PC+)', 'ViTSeg (PC)']\n", + "tab1.insert(1, 't', [0.1]*tab1.shape[0])\n", + "print(tab1.to_latex(header=False, index=False))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# One-shot" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pascal" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pas = experiment('experiments/pascal_1shot.yaml', nums=':19').dataframe()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pas[['name', 'pas_h2_miou_0.3', 'pas_h2_biniou_0.3', 'pas_h2_ap', 'pas_h2_fgiou_ct']]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pas = experiment('experiments/pascal_1shot.yaml', nums=':8').dataframe()\n", + "tab1 = pas[['pas_h2_miou_0.3', 'pas_h2_biniou_0.3', 'pas_h2_ap']]\n", + "print('CLIPSeg (PC+) & 0.3 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')\n", + "print('CLIPSeg (PC) & 0.3 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[4:8].mean(0).values), '\\\\\\\\')\n", + "\n", + "pas = experiment('experiments/pascal_1shot.yaml', nums='12:16').dataframe()\n", + "tab1 = pas[['pas_h2_miou_0.2', 'pas_h2_biniou_0.2', 'pas_h2_ap']]\n", + "print('CLIP-Deconv (PC+) & 0.2 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')\n", + "\n", + "pas = experiment('experiments/pascal_1shot.yaml', nums='16:20').dataframe()\n", + "tab1 = pas[['pas_t_miou_0.2', 'pas_t_biniou_0.2', 'pas_t_ap']]\n", + "print('ViTSeg (PC+) & 0.2 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Pascal Zero-shot (in one-shot setting)\n", + "\n", + "Using the same setting as one-shot (hence different from the other zero-shot benchmark)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pas = experiment('experiments/pascal_1shot.yaml', nums=':8').dataframe()\n", + "tab1 = pas[['pas_t_miou_0.3', 'pas_t_biniou_0.3', 'pas_t_ap']]\n", + "print('CLIPSeg (PC+) & 0.3 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')\n", + "print('CLIPSeg (PC) & 0.3 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[4:8].mean(0).values), '\\\\\\\\')\n", + "\n", + "pas = experiment('experiments/pascal_1shot.yaml', nums='12:16').dataframe()\n", + "tab1 = pas[['pas_t_miou_0.3', 'pas_t_biniou_0.3', 'pas_t_ap']]\n", + "print('CLIP-Deconv (PC+) & 0.3 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')\n", + "\n", + "pas = experiment('experiments/pascal_1shot.yaml', nums='16:20').dataframe()\n", + "tab1 = pas[['pas_t_miou_0.2', 'pas_t_biniou_0.2', 'pas_t_ap']]\n", + "print('ViTSeg (PC+) & 0.2 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# without fixed thresholds...\n", + "\n", + "pas = experiment('experiments/pascal_1shot.yaml', nums=':8').dataframe()\n", + "tab1 = pas[['pas_t_best_miou', 'pas_t_best_biniou', 'pas_t_ap']]\n", + "print('CLIPSeg (PC+) & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')\n", + "print('CLIPSeg (PC) & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[4:8].mean(0).values), '\\\\\\\\')\n", + "\n", + "pas = experiment('experiments/pascal_1shot.yaml', nums='12:16').dataframe()\n", + "tab1 = pas[['pas_t_best_miou', 'pas_t_best_biniou', 'pas_t_ap']]\n", + "print('CLIP-Deconv (PC+) & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[0:4].mean(0).values), '\\\\\\\\')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### COCO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "coco = experiment('experiments/coco.yaml', nums=':29').dataframe()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tab1 = coco[['coco_h2_miou_0.1', 'coco_h2_biniou_0.1', 'coco_h2_ap']]\n", + "tab2 = coco[['coco_h2_miou_0.2', 'coco_h2_biniou_0.2', 'coco_h2_ap']]\n", + "tab3 = coco[['coco_h2_miou_best', 'coco_h2_biniou_best', 'coco_h2_ap']]\n", + "print('CLIPSeg (COCO) & 0.1 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[:4].mean(0).values), '\\\\\\\\')\n", + "print('CLIPSeg (COCO+N) & 0.1 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[4:8].mean(0).values), '\\\\\\\\')\n", + "print('CLIP-Deconv (COCO+N) & 0.1 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[12:16].mean(0).values), '\\\\\\\\')\n", + "print('ViTSeg (COCO) & 0.1 & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[8:12].mean(0).values), '\\\\\\\\')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Zero-shot" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "zs = experiment('experiments/pascal_0shot.yaml', nums=':11').dataframe()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "tab1 = zs[['pas_zs_seen', 'pas_zs_unseen']]\n", + "print('CLIPSeg (PC+) & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[8:9].values[0].tolist() + tab1[10:11].values[0].tolist()), '\\\\\\\\')\n", + "print('CLIP-Deconv & CLIP & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[2:3].values[0].tolist() + tab1[3:4].values[0].tolist()), '\\\\\\\\')\n", + "print('ViTSeg & ImageNet-1K & ' + ' & '.join(f'{x*100:.1f}' for x in tab1[4:5].values[0].tolist() + tab1[5:6].values[0].tolist()), '\\\\\\\\')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Ablation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ablation = experiment('experiments/ablation.yaml', nums=':8').dataframe()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tab1 = ablation[['name', 'pc_miou_best', 'pc_ap', 'pc-vis_miou_best', 'pc-vis_ap']]\n", + "for k in ['pc_miou_best', 'pc_ap', 'pc-vis_miou_best', 'pc-vis_ap']:\n", + " tab1.loc[:, k] = (100 * tab1.loc[:, k]).round(1)\n", + "tab1.loc[:, 'name'] = ['CLIPSeg', 'no CLIP pre-training', 'no-negatives', '50% negatives', 'no visual', '$D=16$', 'only layer 3', 'highlight mask']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(tab1.loc[[0,1,4,5,6,7],:].to_latex(header=False, index=False))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(tab1.loc[[0,1,4,5,6,7],:].to_latex(header=False, index=False))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Generalization" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "generalization = experiment('experiments/generalize.yaml').dataframe()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gen = generalization[['aff_best_fgiou', 'aff_ap', 'ability_best_fgiou', 'ability_ap', 'part_best_fgiou', 'part_ap']].values" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\n", + " 'CLIPSeg (PC+) & ' + ' & '.join(f'{x*100:.1f}' for x in gen[1]) + ' \\\\\\\\ \\n' + \\\n", + " 'CLIPSeg (LVIS) & ' + ' & '.join(f'{x*100:.1f}' for x in gen[0]) + ' \\\\\\\\ \\n' + \\\n", + " 'CLIP-Deconv & ' + ' & '.join(f'{x*100:.1f}' for x in gen[2]) + ' \\\\\\\\ \\n' + \\\n", + " 'VITSeg & ' + ' & '.join(f'{x*100:.1f}' for x in gen[3]) + ' \\\\\\\\'\n", + ")" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "800ed241f7db2bd3aa6942aa3be6809cdb30ee6b0a9e773dfecfa9fef1f4c586" + }, + "kernelspec": { + "display_name": "env2", + "language": "python", + "name": "env2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Visual_Feature_Engineering.ipynb b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Visual_Feature_Engineering.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..23c108c9e80862483c2dd34fbb4e0d00bb64e9a1 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/Visual_Feature_Engineering.ipynb @@ -0,0 +1,366 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Systematic" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import clip\n", + "from evaluation_utils import norm, denorm\n", + "from general_utils import *\n", + "from datasets.lvis_oneshot3 import LVIS_OneShot3\n", + "\n", + "clip_device = 'cuda'\n", + "clip_model, preprocess = clip.load(\"ViT-B/16\", device=clip_device)\n", + "clip_model.eval();\n", + "\n", + "from models.clipseg import CLIPDensePredTMasked\n", + "\n", + "clip_mask_model = CLIPDensePredTMasked(version='ViT-B/16').to(clip_device)\n", + "clip_mask_model.eval();" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "lvis = LVIS_OneShot3('train_fixed', mask='separate', normalize=True, with_class_label=True, add_bar=False, \n", + " text_class_labels=True, image_size=352, min_area=0.1,\n", + " min_frac_s=0.05, min_frac_q=0.05, fix_find_crop=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_data(lvis)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from collections import defaultdict\n", + "import json\n", + "\n", + "lvis_raw = json.load(open(expanduser('~/datasets/LVIS/lvis_v1_train.json')))\n", + "lvis_val_raw = json.load(open(expanduser('~/datasets/LVIS/lvis_v1_val.json')))\n", + "\n", + "objects_per_image = defaultdict(lambda : set())\n", + "for ann in lvis_raw['annotations']:\n", + " objects_per_image[ann['image_id']].add(ann['category_id'])\n", + " \n", + "for ann in lvis_val_raw['annotations']:\n", + " objects_per_image[ann['image_id']].add(ann['category_id']) \n", + " \n", + "objects_per_image = {o: [lvis.category_names[o] for o in v] for o, v in objects_per_image.items()}\n", + "\n", + "del lvis_raw, lvis_val_raw" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#bs = 32\n", + "#batches = [get_batch(lvis, i*bs, (i+1)*bs, cuda=True) for i in range(10)]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from general_utils import get_batch\n", + "from functools import partial\n", + "from evaluation_utils import img_preprocess\n", + "import torch\n", + "\n", + "def get_similarities(batches_or_dataset, process, mask=lambda x: None, clipmask=False):\n", + "\n", + " # base_words = [f'a photo of {x}' for x in ['a person', 'an animal', 'a knife', 'a cup']]\n", + "\n", + " all_prompts = []\n", + " \n", + " with torch.no_grad():\n", + " valid_sims = []\n", + " torch.manual_seed(571)\n", + " \n", + " if type(batches_or_dataset) == list:\n", + " loader = batches_or_dataset # already loaded\n", + " max_iter = float('inf')\n", + " else:\n", + " loader = DataLoader(batches_or_dataset, shuffle=False, batch_size=32)\n", + " max_iter = 50\n", + " \n", + " global batch\n", + " for i_batch, (batch, batch_y) in enumerate(loader):\n", + " \n", + " if i_batch >= max_iter: break\n", + " \n", + " processed_batch = process(batch)\n", + " if type(processed_batch) == dict:\n", + " \n", + " # processed_batch = {k: v.to(clip_device) for k, v in processed_batch.items()}\n", + " image_features = clip_mask_model.visual_forward(**processed_batch)[0].to(clip_device).half()\n", + " else:\n", + " processed_batch = process(batch).to(clip_device)\n", + " processed_batch = nnf.interpolate(processed_batch, (224, 224), mode='bilinear')\n", + " #image_features = clip_model.encode_image(processed_batch.to(clip_device)) \n", + " image_features = clip_mask_model.visual_forward(processed_batch)[0].to(clip_device).half()\n", + " \n", + " image_features = image_features / image_features.norm(dim=-1, keepdim=True)\n", + " bs = len(batch[0])\n", + " for j in range(bs):\n", + " \n", + " c, _, sid, qid = lvis.sample_ids[bs * i_batch + j]\n", + " support_image = basename(lvis.samples[c][sid])\n", + " \n", + " img_objs = [o for o in objects_per_image[int(support_image)]]\n", + " img_objs = [o.replace('_', ' ') for o in img_objs]\n", + " \n", + " other_words = [f'a photo of a {o.replace(\"_\", \" \")}' for o in img_objs \n", + " if o != batch_y[2][j]]\n", + " \n", + " prompts = [f'a photo of a {batch_y[2][j]}'] + other_words\n", + " all_prompts += [prompts]\n", + " \n", + " text_cond = clip_model.encode_text(clip.tokenize(prompts).to(clip_device))\n", + " text_cond = text_cond / text_cond.norm(dim=-1, keepdim=True) \n", + "\n", + " global logits\n", + " logits = clip_model.logit_scale.exp() * image_features[j] @ text_cond.T\n", + "\n", + " global sim\n", + " sim = torch.softmax(logits, dim=-1)\n", + " \n", + " valid_sims += [sim]\n", + " \n", + " #valid_sims = torch.stack(valid_sims)\n", + " return valid_sims, all_prompts\n", + " \n", + "\n", + "def new_img_preprocess(x):\n", + " return {'x_inp': x[1], 'mask': (11, 'cls_token', x[2])}\n", + " \n", + "#get_similarities(lvis, partial(img_preprocess, center_context=0.5));\n", + "get_similarities(lvis, lambda x: x[1]);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preprocessing_functions = [\n", + "# ['clip mask CLS L11', lambda x: {'x_inp': x[1].cuda(), 'mask': (11, 'cls_token', x[2].cuda())}],\n", + "# ['clip mask CLS all', lambda x: {'x_inp': x[1].cuda(), 'mask': ('all', 'cls_token', x[2].cuda())}],\n", + "# ['clip mask all all', lambda x: {'x_inp': x[1].cuda(), 'mask': ('all', 'all', x[2].cuda())}],\n", + "# ['colorize object red', partial(img_preprocess, colorize=True)],\n", + "# ['add red outline', partial(img_preprocess, outline=True)],\n", + " \n", + "# ['BG brightness 50%', partial(img_preprocess, bg_fac=0.5)],\n", + "# ['BG brightness 10%', partial(img_preprocess, bg_fac=0.1)],\n", + "# ['BG brightness 0%', partial(img_preprocess, bg_fac=0.0)],\n", + "# ['BG blur', partial(img_preprocess, blur=3)],\n", + "# ['BG blur & intensity 10%', partial(img_preprocess, blur=3, bg_fac=0.1)],\n", + " \n", + "# ['crop large context', partial(img_preprocess, center_context=0.5)],\n", + "# ['crop small context', partial(img_preprocess, center_context=0.1)],\n", + " ['crop & background blur', partial(img_preprocess, blur=3, center_context=0.5)],\n", + " ['crop & intensity 10%', partial(img_preprocess, blur=3, bg_fac=0.1)],\n", + "# ['crop & background blur & intensity 10%', partial(img_preprocess, blur=3, center_context=0.1, bg_fac=0.1)],\n", + "]\n", + "\n", + "preprocessing_functions = preprocessing_functions\n", + "\n", + "base, base_p = get_similarities(lvis, lambda x: x[1])\n", + "outs = [get_similarities(lvis, fun) for _, fun in preprocessing_functions]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "outs2 = [get_similarities(lvis, fun) for _, fun in [['BG brightness 0%', partial(img_preprocess, bg_fac=0.0)]]]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for j in range(1):\n", + " print(np.mean([outs2[j][0][i][0].cpu() - base[i][0].cpu() for i in range(len(base)) if len(base_p[i]) >= 3]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pandas import DataFrame\n", + "tab = dict()\n", + "for j, (name, _) in enumerate(preprocessing_functions):\n", + " tab[name] = np.mean([outs[j][0][i][0].cpu() - base[i][0].cpu() for i in range(len(base)) if len(base_p[i]) >= 3])\n", + " \n", + " \n", + "print('\\n'.join(f'{k} & {v*100:.2f} \\\\\\\\' for k,v in tab.items())) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Visual" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from evaluation_utils import denorm, norm" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def load_sample(filename, filename2):\n", + " from os.path import join\n", + " bp = expanduser('~/cloud/resources/sample_images')\n", + " tf = transforms.Compose([\n", + " transforms.ToTensor(),\n", + " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", + " transforms.Resize(224),\n", + " transforms.CenterCrop(224)\n", + " ])\n", + " tf2 = transforms.Compose([\n", + " transforms.ToTensor(),\n", + " transforms.Resize(224),\n", + " transforms.CenterCrop(224)\n", + " ])\n", + " inp1 = [None, tf(Image.open(join(bp, filename))), tf2(Image.open(join(bp, filename2)))]\n", + " inp1[1] = inp1[1].unsqueeze(0)\n", + " inp1[2] = inp1[2][:1] \n", + " return inp1\n", + "\n", + "def all_preprocessing(inp1):\n", + " return [\n", + " img_preprocess(inp1),\n", + " img_preprocess(inp1, colorize=True),\n", + " img_preprocess(inp1, outline=True), \n", + " img_preprocess(inp1, blur=3),\n", + " img_preprocess(inp1, bg_fac=0.1),\n", + " #img_preprocess(inp1, bg_fac=0.5),\n", + " #img_preprocess(inp1, blur=3, bg_fac=0.5), \n", + " img_preprocess(inp1, blur=3, bg_fac=0.5, center_context=0.5),\n", + " ]\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from torchvision import transforms\n", + "from PIL import Image\n", + "from matplotlib import pyplot as plt\n", + "from evaluation_utils import img_preprocess\n", + "import clip\n", + "\n", + "images_queries = [\n", + " [load_sample('things1.jpg', 'things1_jar.png'), ['jug', 'knife', 'car', 'animal', 'sieve', 'nothing']],\n", + " [load_sample('own_photos/IMG_2017s_square.jpg', 'own_photos/IMG_2017s_square_trash_can.png'), ['trash bin', 'house', 'car', 'bike', 'window', 'nothing']],\n", + "]\n", + "\n", + "\n", + "_, ax = plt.subplots(2 * len(images_queries), 6, figsize=(14, 4.5 * len(images_queries)))\n", + "\n", + "for j, (images, objects) in enumerate(images_queries):\n", + " \n", + " joint_image = all_preprocessing(images)\n", + " \n", + " joint_image = torch.stack(joint_image)[:,0]\n", + " clip_model, preprocess = clip.load(\"ViT-B/16\", device='cpu')\n", + " image_features = clip_model.encode_image(joint_image)\n", + " image_features = image_features / image_features.norm(dim=-1, keepdim=True)\n", + " \n", + " prompts = [f'a photo of a {obj}'for obj in objects]\n", + " text_cond = clip_model.encode_text(clip.tokenize(prompts))\n", + " text_cond = text_cond / text_cond.norm(dim=-1, keepdim=True)\n", + " logits = clip_model.logit_scale.exp() * image_features @ text_cond.T\n", + " sim = torch.softmax(logits, dim=-1).detach().cpu()\n", + "\n", + " for i, img in enumerate(joint_image):\n", + " ax[2*j, i].axis('off')\n", + " \n", + " ax[2*j, i].imshow(torch.clamp(denorm(joint_image[i]).permute(1,2,0), 0, 1))\n", + " ax[2*j+ 1, i].grid(True)\n", + " \n", + " ax[2*j + 1, i].set_ylim(0,1)\n", + " ax[2*j + 1, i].set_yticklabels([])\n", + " ax[2*j + 1, i].set_xticks([]) # set_xticks(range(len(prompts)))\n", + "# ax[1, i].set_xticklabels(objects, rotation=90)\n", + " for k in range(len(sim[i])):\n", + " ax[2*j + 1, i].bar(k, sim[i][k], color=plt.cm.tab20(1) if k!=0 else plt.cm.tab20(3))\n", + " ax[2*j + 1, i].text(k, 0.07, objects[k], rotation=90, ha='center', fontsize=15)\n", + "\n", + "plt.tight_layout()\n", + "plt.savefig('figures/prompt_engineering.pdf', bbox_inches='tight')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "env2", + "language": "python", + "name": "env2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/coco_wrapper.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/coco_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..f88d98bb41ee23682a6aaea75a50a3b61e569304 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/coco_wrapper.py @@ -0,0 +1,99 @@ +import pickle +from types import new_class +import torch +import numpy as np +import os +import json + +from os.path import join, dirname, isdir, isfile, expanduser, realpath, basename +from random import shuffle, seed as set_seed +from PIL import Image + +from itertools import combinations +from torchvision import transforms +from torchvision.transforms.transforms import Resize + +from datasets.utils import blend_image_segmentation +from general_utils import get_from_repository + +COCO_CLASSES = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'} + +class COCOWrapper(object): + + def __init__(self, split, fold=0, image_size=400, aug=None, mask='separate', negative_prob=0, + with_class_label=False): + super().__init__() + + self.mask = mask + self.with_class_label = with_class_label + self.negative_prob = negative_prob + + from third_party.hsnet.data.coco import DatasetCOCO + + get_from_repository('COCO-20i', ['COCO-20i.tar']) + + foldpath = join(dirname(__file__), '../third_party/hsnet/data/splits/coco/%s/fold%d.pkl') + + def build_img_metadata_classwise(self): + with open(foldpath % (self.split, self.fold), 'rb') as f: + img_metadata_classwise = pickle.load(f) + return img_metadata_classwise + + + DatasetCOCO.build_img_metadata_classwise = build_img_metadata_classwise + # DatasetCOCO.read_mask = read_mask + + mean = [0.485, 0.456, 0.406] + std = [0.229, 0.224, 0.225] + transform = transforms.Compose([ + transforms.Resize((image_size, image_size)), + transforms.ToTensor(), + transforms.Normalize(mean, std) + ]) + + self.coco = DatasetCOCO(expanduser('~/datasets/COCO-20i/'), fold, transform, split, 1, False) + + self.all_classes = [self.coco.class_ids] + self.coco.base_path = join(expanduser('~/datasets/COCO-20i')) + + def __len__(self): + return len(self.coco) + + def __getitem__(self, i): + sample = self.coco[i] + + label_name = COCO_CLASSES[int(sample['class_id'])] + + img_s, seg_s = sample['support_imgs'][0], sample['support_masks'][0] + + if self.negative_prob > 0 and torch.rand(1).item() < self.negative_prob: + new_class_id = sample['class_id'] + while new_class_id == sample['class_id']: + sample2 = self.coco[torch.randint(0, len(self), (1,)).item()] + new_class_id = sample2['class_id'] + img_s = sample2['support_imgs'][0] + seg_s = torch.zeros_like(seg_s) + + mask = self.mask + if mask == 'separate': + supp = (img_s, seg_s) + elif mask == 'text_label': + # DEPRECATED + supp = [int(sample['class_id'])] + elif mask == 'text': + supp = [label_name] + else: + if mask.startswith('text_and_'): + mask = mask[9:] + label_add = [label_name] + else: + label_add = [] + + supp = label_add + blend_image_segmentation(img_s, seg_s, mode=mask) + + if self.with_class_label: + label = (torch.zeros(0), sample['class_id'],) + else: + label = (torch.zeros(0), ) + + return (sample['query_img'],) + tuple(supp), (sample['query_mask'].unsqueeze(0),) + label \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/pascal_classes.json b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/pascal_classes.json new file mode 100644 index 0000000000000000000000000000000000000000..1d8ad2b9ff453a88af7d50c412fd291ec6567644 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/pascal_classes.json @@ -0,0 +1 @@ +[{"id": 1, "synonyms": ["aeroplane"]}, {"id": 2, "synonyms": ["bicycle"]}, {"id": 3, "synonyms": ["bird"]}, {"id": 4, "synonyms": ["boat"]}, {"id": 5, "synonyms": ["bottle"]}, {"id": 6, "synonyms": ["bus"]}, {"id": 7, "synonyms": ["car"]}, {"id": 8, "synonyms": ["cat"]}, {"id": 9, "synonyms": ["chair"]}, {"id": 10, "synonyms": ["cow"]}, {"id": 11, "synonyms": ["diningtable"]}, {"id": 12, "synonyms": ["dog"]}, {"id": 13, "synonyms": ["horse"]}, {"id": 14, "synonyms": ["motorbike"]}, {"id": 15, "synonyms": ["person"]}, {"id": 16, "synonyms": ["pottedplant"]}, {"id": 17, "synonyms": ["sheep"]}, {"id": 18, "synonyms": ["sofa"]}, {"id": 19, "synonyms": ["train"]}, {"id": 20, "synonyms": ["tvmonitor"]}] \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/pascal_zeroshot.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/pascal_zeroshot.py new file mode 100644 index 0000000000000000000000000000000000000000..3fa84de9049bf272538f97b408bed07a9e9b5478 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/pascal_zeroshot.py @@ -0,0 +1,60 @@ +from os.path import expanduser +import torch +import json +import torchvision +from general_utils import get_from_repository +from general_utils import log +from torchvision import transforms + +PASCAL_VOC_CLASSES_ZS = [['cattle.n.01', 'motorcycle.n.01'], ['aeroplane.n.01', 'sofa.n.01'], + ['cat.n.01', 'television.n.03'], ['train.n.01', 'bottle.n.01'], + ['chair.n.01', 'pot_plant.n.01']] + + +class PascalZeroShot(object): + + def __init__(self, split, n_unseen, image_size=224) -> None: + super().__init__() + + import sys + sys.path.append('third_party/JoEm') + from third_party.JoEm.data_loader.dataset import VOCSegmentation + from third_party.JoEm.data_loader import get_seen_idx, get_unseen_idx, VOC + + self.pascal_classes = VOC + self.image_size = image_size + + self.transform = transforms.Compose([ + transforms.Resize((image_size, image_size)), + ]) + + if split == 'train': + self.voc = VOCSegmentation(get_unseen_idx(n_unseen), get_seen_idx(n_unseen), + split=split, transform=True, transform_args=dict(base_size=312, crop_size=312), + ignore_bg=False, ignore_unseen=False, remv_unseen_img=True) + elif split == 'val': + self.voc = VOCSegmentation(get_unseen_idx(n_unseen), get_seen_idx(n_unseen), + split=split, transform=False, + ignore_bg=False, ignore_unseen=False) + + self.unseen_idx = get_unseen_idx(n_unseen) + + def __len__(self): + return len(self.voc) + + def __getitem__(self, i): + + sample = self.voc[i] + label = sample['label'].long() + all_labels = [l for l in torch.where(torch.bincount(label.flatten())>0)[0].numpy().tolist() if l != 255] + class_indices = [l for l in all_labels] + class_names = [self.pascal_classes[l] for l in all_labels] + + image = self.transform(sample['image']) + + label = transforms.Resize((self.image_size, self.image_size), + interpolation=torchvision.transforms.InterpolationMode.NEAREST)(label.unsqueeze(0))[0] + + return (image,), (label, ) + + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/pfe_dataset.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/pfe_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..83988dea963a2c4226010a336573de94bf06c55e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/pfe_dataset.py @@ -0,0 +1,129 @@ +from os.path import expanduser +import torch +import json +from general_utils import get_from_repository +from datasets.lvis_oneshot3 import blend_image_segmentation +from general_utils import log + +PASCAL_CLASSES = {a['id']: a['synonyms'] for a in json.load(open('datasets/pascal_classes.json'))} + + +class PFEPascalWrapper(object): + + def __init__(self, mode, split, mask='separate', image_size=473, label_support=None, size=None, p_negative=0, aug=None): + import sys + # sys.path.append(expanduser('~/projects/new_one_shot')) + from third_party.PFENet.util.dataset import SemData + + get_from_repository('PascalVOC2012', ['Pascal5i.tar']) + + self.p_negative = p_negative + self.size = size + self.mode = mode + self.image_size = image_size + + if label_support in {True, False}: + log.warning('label_support argument is deprecated. Use mask instead.') + #raise ValueError() + + self.mask = mask + + value_scale = 255 + mean = [0.485, 0.456, 0.406] + mean = [item * value_scale for item in mean] + std = [0.229, 0.224, 0.225] + std = [item * value_scale for item in std] + + import third_party.PFENet.util.transform as transform + + if mode == 'val': + data_list = expanduser('~/projects/old_one_shot/PFENet/lists/pascal/val.txt') + + data_transform = [transform.test_Resize(size=image_size)] if image_size != 'original' else [] + data_transform += [ + transform.ToTensor(), + transform.Normalize(mean=mean, std=std) + ] + + + elif mode == 'train': + data_list = expanduser('~/projects/old_one_shot/PFENet/lists/pascal/voc_sbd_merge_noduplicate.txt') + + assert image_size != 'original' + + data_transform = [ + transform.RandScale([0.9, 1.1]), + transform.RandRotate([-10, 10], padding=mean, ignore_label=255), + transform.RandomGaussianBlur(), + transform.RandomHorizontalFlip(), + transform.Crop((image_size, image_size), crop_type='rand', padding=mean, ignore_label=255), + transform.ToTensor(), + transform.Normalize(mean=mean, std=std) + ] + + data_transform = transform.Compose(data_transform) + + self.dataset = SemData(split=split, mode=mode, data_root=expanduser('~/datasets/PascalVOC2012/VOC2012'), + data_list=data_list, shot=1, transform=data_transform, use_coco=False, use_split_coco=False) + + self.class_list = self.dataset.sub_val_list if mode == 'val' else self.dataset.sub_list + + # verify that subcls_list always has length 1 + # assert len(set([len(d[4]) for d in self.dataset])) == 1 + + print('actual length', len(self.dataset.data_list)) + + def __len__(self): + if self.mode == 'val': + return len(self.dataset.data_list) + else: + return len(self.dataset.data_list) + + def __getitem__(self, index): + if self.dataset.mode == 'train': + image, label, s_x, s_y, subcls_list = self.dataset[index % len(self.dataset.data_list)] + elif self.dataset.mode == 'val': + image, label, s_x, s_y, subcls_list, ori_label = self.dataset[index % len(self.dataset.data_list)] + ori_label = torch.from_numpy(ori_label).unsqueeze(0) + + if self.image_size != 'original': + longerside = max(ori_label.size(1), ori_label.size(2)) + backmask = torch.ones(ori_label.size(0), longerside, longerside).cuda()*255 + backmask[0, :ori_label.size(1), :ori_label.size(2)] = ori_label + label = backmask.clone().long() + else: + label = label.unsqueeze(0) + + # assert label.shape == (473, 473) + + if self.p_negative > 0: + if torch.rand(1).item() < self.p_negative: + while True: + idx = torch.randint(0, len(self.dataset.data_list), (1,)).item() + _, _, s_x, s_y, subcls_list_tmp, _ = self.dataset[idx] + if subcls_list[0] != subcls_list_tmp[0]: + break + + s_x = s_x[0] + s_y = (s_y == 1)[0] + label_fg = (label == 1).float() + val_mask = (label != 255).float() + + class_id = self.class_list[subcls_list[0]] + + label_name = PASCAL_CLASSES[class_id][0] + label_add = () + mask = self.mask + + if mask == 'text': + support = ('a photo of a ' + label_name + '.',) + elif mask == 'separate': + support = (s_x, s_y) + else: + if mask.startswith('text_and_'): + label_add = (label_name,) + mask = mask[9:] + + support = (blend_image_segmentation(s_x, s_y.float(), mask)[0],) + + return (image,) + label_add + support, (label_fg.unsqueeze(0), val_mask.unsqueeze(0), subcls_list[0]) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/phrasecut.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/phrasecut.py new file mode 100644 index 0000000000000000000000000000000000000000..ef0c5350583c33c64682a35af3d314b02831569c --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/phrasecut.py @@ -0,0 +1,335 @@ + +import torch +import numpy as np +import os + +from os.path import join, isdir, isfile, expanduser +from PIL import Image + +from torchvision import transforms +from torchvision.transforms.transforms import Resize + +from torch.nn import functional as nnf +from general_utils import get_from_repository + +from skimage.draw import polygon2mask + + + +def random_crop_slices(origin_size, target_size): + """Gets slices of a random crop. """ + assert origin_size[0] >= target_size[0] and origin_size[1] >= target_size[1], f'actual size: {origin_size}, target size: {target_size}' + + offset_y = torch.randint(0, origin_size[0] - target_size[0] + 1, (1,)).item() # range: 0 <= value < high + offset_x = torch.randint(0, origin_size[1] - target_size[1] + 1, (1,)).item() + + return slice(offset_y, offset_y + target_size[0]), slice(offset_x, offset_x + target_size[1]) + + +def find_crop(seg, image_size, iterations=1000, min_frac=None, best_of=None): + + + best_crops = [] + best_crop_not_ok = float('-inf'), None, None + min_sum = 0 + + seg = seg.astype('bool') + + if min_frac is not None: + #min_sum = seg.sum() * min_frac + min_sum = seg.shape[0] * seg.shape[1] * min_frac + + for iteration in range(iterations): + sl_y, sl_x = random_crop_slices(seg.shape, image_size) + seg_ = seg[sl_y, sl_x] + sum_seg_ = seg_.sum() + + if sum_seg_ > min_sum: + + if best_of is None: + return sl_y, sl_x, False + else: + best_crops += [(sum_seg_, sl_y, sl_x)] + if len(best_crops) >= best_of: + best_crops.sort(key=lambda x:x[0], reverse=True) + sl_y, sl_x = best_crops[0][1:] + + return sl_y, sl_x, False + + else: + if sum_seg_ > best_crop_not_ok[0]: + best_crop_not_ok = sum_seg_, sl_y, sl_x + + else: + # return best segmentation found + return best_crop_not_ok[1:] + (best_crop_not_ok[0] <= min_sum,) + + +class PhraseCut(object): + + def __init__(self, split, image_size=400, negative_prob=0, aug=None, aug_color=False, aug_crop=True, + min_size=0, remove_classes=None, with_visual=False, only_visual=False, mask=None): + super().__init__() + + self.negative_prob = negative_prob + self.image_size = image_size + self.with_visual = with_visual + self.only_visual = only_visual + self.phrase_form = '{}' + self.mask = mask + self.aug_crop = aug_crop + + if aug_color: + self.aug_color = transforms.Compose([ + transforms.ColorJitter(0.5, 0.5, 0.2, 0.05), + ]) + else: + self.aug_color = None + + get_from_repository('PhraseCut', ['PhraseCut.tar'], integrity_check=lambda local_dir: all([ + isdir(join(local_dir, 'VGPhraseCut_v0')), + isdir(join(local_dir, 'VGPhraseCut_v0', 'images')), + isfile(join(local_dir, 'VGPhraseCut_v0', 'refer_train.json')), + len(os.listdir(join(local_dir, 'VGPhraseCut_v0', 'images'))) in {108250, 108249} + ])) + + from third_party.PhraseCutDataset.utils.refvg_loader import RefVGLoader + self.refvg_loader = RefVGLoader(split=split) + + # img_ids where the size in the annotations does not match actual size + invalid_img_ids = set([150417, 285665, 498246, 61564, 285743, 498269, 498010, 150516, 150344, 286093, 61530, + 150333, 286065, 285814, 498187, 285761, 498042]) + + mean = [0.485, 0.456, 0.406] + std = [0.229, 0.224, 0.225] + self.normalize = transforms.Normalize(mean, std) + + self.sample_ids = [(i, j) + for i in self.refvg_loader.img_ids + for j in range(len(self.refvg_loader.get_img_ref_data(i)['phrases'])) + if i not in invalid_img_ids] + + + # self.all_phrases = list(set([p for i in self.refvg_loader.img_ids for p in self.refvg_loader.get_img_ref_data(i)['phrases']])) + + from nltk.stem import WordNetLemmatizer + wnl = WordNetLemmatizer() + + # Filter by class (if remove_classes is set) + if remove_classes is None: + pass + else: + from datasets.generate_lvis_oneshot import PASCAL_SYNSETS, traverse_lemmas, traverse_lemmas_hypo + from nltk.corpus import wordnet + + print('remove pascal classes...') + + get_data = self.refvg_loader.get_img_ref_data # shortcut + keep_sids = None + + if remove_classes[0] == 'pas5i': + subset_id = remove_classes[1] + from datasets.generate_lvis_oneshot import PASCAL_5I_SYNSETS_ORDERED, PASCAL_5I_CLASS_IDS + avoid = [PASCAL_5I_SYNSETS_ORDERED[i] for i in range(20) if i+1 not in PASCAL_5I_CLASS_IDS[subset_id]] + + + elif remove_classes[0] == 'zs': + stop = remove_classes[1] + + from datasets.pascal_zeroshot import PASCAL_VOC_CLASSES_ZS + + avoid = [c for class_set in PASCAL_VOC_CLASSES_ZS[:stop] for c in class_set] + print(avoid) + + elif remove_classes[0] == 'aff': + # avoid = ['drink.v.01', 'sit.v.01', 'ride.v.02'] + # all_lemmas = set(['drink', 'sit', 'ride']) + avoid = ['drink', 'drinks', 'drinking', 'sit', 'sits', 'sitting', + 'ride', 'rides', 'riding', + 'fly', 'flies', 'flying', 'drive', 'drives', 'driving', 'driven', + 'swim', 'swims', 'swimming', + 'wheels', 'wheel', 'legs', 'leg', 'ear', 'ears'] + keep_sids = [(i, j) for i, j in self.sample_ids if + all(x not in avoid for x in get_data(i)['phrases'][j].split(' '))] + + print('avoid classes:', avoid) + + + if keep_sids is None: + all_lemmas = [s for ps in avoid for s in traverse_lemmas_hypo(wordnet.synset(ps), max_depth=None)] + all_lemmas = list(set(all_lemmas)) + all_lemmas = [h.replace('_', ' ').lower() for h in all_lemmas] + all_lemmas = set(all_lemmas) + + # divide into multi word and single word + all_lemmas_s = set(l for l in all_lemmas if ' ' not in l) + all_lemmas_m = set(l for l in all_lemmas if l not in all_lemmas_s) + + # new3 + phrases = [get_data(i)['phrases'][j] for i, j in self.sample_ids] + remove_sids = set((i,j) for (i,j), phrase in zip(self.sample_ids, phrases) + if any(l in phrase for l in all_lemmas_m) or + len(set(wnl.lemmatize(w) for w in phrase.split(' ')).intersection(all_lemmas_s)) > 0 + ) + keep_sids = [(i, j) for i, j in self.sample_ids if (i,j) not in remove_sids] + + print(f'Reduced to {len(keep_sids) / len(self.sample_ids):.3f}') + removed_ids = set(self.sample_ids) - set(keep_sids) + + print('Examples of removed', len(removed_ids)) + for i, j in list(removed_ids)[:20]: + print(i, get_data(i)['phrases'][j]) + + self.sample_ids = keep_sids + + from itertools import groupby + samples_by_phrase = [(self.refvg_loader.get_img_ref_data(i)['phrases'][j], (i, j)) + for i, j in self.sample_ids] + samples_by_phrase = sorted(samples_by_phrase) + samples_by_phrase = groupby(samples_by_phrase, key=lambda x: x[0]) + + self.samples_by_phrase = {prompt: [s[1] for s in prompt_sample_ids] for prompt, prompt_sample_ids in samples_by_phrase} + + self.all_phrases = list(set(self.samples_by_phrase.keys())) + + + if self.only_visual: + assert self.with_visual + self.sample_ids = [(i, j) for i, j in self.sample_ids + if len(self.samples_by_phrase[self.refvg_loader.get_img_ref_data(i)['phrases'][j]]) > 1] + + # Filter by size (if min_size is set) + sizes = [self.refvg_loader.get_img_ref_data(i)['gt_boxes'][j] for i, j in self.sample_ids] + image_sizes = [self.refvg_loader.get_img_ref_data(i)['width'] * self.refvg_loader.get_img_ref_data(i)['height'] for i, j in self.sample_ids] + #self.sizes = [sum([(s[2] - s[0]) * (s[3] - s[1]) for s in size]) for size in sizes] + self.sizes = [sum([s[2] * s[3] for s in size]) / img_size for size, img_size in zip(sizes, image_sizes)] + + if min_size: + print('filter by size') + + self.sample_ids = [self.sample_ids[i] for i in range(len(self.sample_ids)) if self.sizes[i] > min_size] + + self.base_path = join(expanduser('~/datasets/PhraseCut/VGPhraseCut_v0/images/')) + + def __len__(self): + return len(self.sample_ids) + + + def load_sample(self, sample_i, j): + + img_ref_data = self.refvg_loader.get_img_ref_data(sample_i) + + polys_phrase0 = img_ref_data['gt_Polygons'][j] + phrase = img_ref_data['phrases'][j] + phrase = self.phrase_form.format(phrase) + + masks = [] + for polys in polys_phrase0: + for poly in polys: + poly = [p[::-1] for p in poly] # swap x,y + masks += [polygon2mask((img_ref_data['height'], img_ref_data['width']), poly)] + + seg = np.stack(masks).max(0) + img = np.array(Image.open(join(self.base_path, str(img_ref_data['image_id']) + '.jpg'))) + + min_shape = min(img.shape[:2]) + + if self.aug_crop: + sly, slx, exceed = find_crop(seg, (min_shape, min_shape), iterations=50, min_frac=0.05) + else: + sly, slx = slice(0, None), slice(0, None) + + seg = seg[sly, slx] + img = img[sly, slx] + + seg = seg.astype('uint8') + seg = torch.from_numpy(seg).view(1, 1, *seg.shape) + + if img.ndim == 2: + img = np.dstack([img] * 3) + + img = torch.from_numpy(img).permute(2,0,1).unsqueeze(0).float() + + seg = nnf.interpolate(seg, (self.image_size, self.image_size), mode='nearest')[0,0] + img = nnf.interpolate(img, (self.image_size, self.image_size), mode='bilinear', align_corners=True)[0] + + # img = img.permute([2,0, 1]) + img = img / 255.0 + + if self.aug_color is not None: + img = self.aug_color(img) + + img = self.normalize(img) + + + + return img, seg, phrase + + def __getitem__(self, i): + + sample_i, j = self.sample_ids[i] + + img, seg, phrase = self.load_sample(sample_i, j) + + if self.negative_prob > 0: + if torch.rand((1,)).item() < self.negative_prob: + + new_phrase = None + while new_phrase is None or new_phrase == phrase: + idx = torch.randint(0, len(self.all_phrases), (1,)).item() + new_phrase = self.all_phrases[idx] + phrase = new_phrase + seg = torch.zeros_like(seg) + + if self.with_visual: + # find a corresponding visual image + if phrase in self.samples_by_phrase and len(self.samples_by_phrase[phrase]) > 1: + idx = torch.randint(0, len(self.samples_by_phrase[phrase]), (1,)).item() + other_sample = self.samples_by_phrase[phrase][idx] + #print(other_sample) + img_s, seg_s, _ = self.load_sample(*other_sample) + + from datasets.utils import blend_image_segmentation + + if self.mask in {'separate', 'text_and_separate'}: + # assert img.shape[1:] == img_s.shape[1:] == seg_s.shape == seg.shape[1:] + add_phrase = [phrase] if self.mask == 'text_and_separate' else [] + vis_s = add_phrase + [img_s, seg_s, True] + else: + if self.mask.startswith('text_and_'): + mask_mode = self.mask[9:] + label_add = [phrase] + else: + mask_mode = self.mask + label_add = [] + + masked_img_s = torch.from_numpy(blend_image_segmentation(img_s, seg_s, mode=mask_mode, image_size=self.image_size)[0]) + vis_s = label_add + [masked_img_s, True] + + else: + # phrase is unique + vis_s = torch.zeros_like(img) + + if self.mask in {'separate', 'text_and_separate'}: + add_phrase = [phrase] if self.mask == 'text_and_separate' else [] + vis_s = add_phrase + [vis_s, torch.zeros(*vis_s.shape[1:], dtype=torch.uint8), False] + elif self.mask.startswith('text_and_'): + vis_s = [phrase, vis_s, False] + else: + vis_s = [vis_s, False] + else: + assert self.mask == 'text' + vis_s = [phrase] + + seg = seg.unsqueeze(0).float() + + data_x = (img,) + tuple(vis_s) + + return data_x, (seg, torch.zeros(0), i) + + +class PhraseCutPlus(PhraseCut): + + def __init__(self, split, image_size=400, aug=None, aug_color=False, aug_crop=True, min_size=0, remove_classes=None, only_visual=False, mask=None): + super().__init__(split, image_size=image_size, negative_prob=0.2, aug=aug, aug_color=aug_color, aug_crop=aug_crop, min_size=min_size, + remove_classes=remove_classes, with_visual=True, only_visual=only_visual, mask=mask) \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/utils.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..35d0127ac66781969b80dfe3e4f887239459ca74 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/datasets/utils.py @@ -0,0 +1,68 @@ + +import numpy as np +import torch + + +def blend_image_segmentation(img, seg, mode, image_size=224): + + + if mode in {'blur_highlight', 'blur3_highlight', 'blur3_highlight01', 'blur_highlight_random', 'crop'}: + if isinstance(img, np.ndarray): + img = torch.from_numpy(img) + + if isinstance(seg, np.ndarray): + seg = torch.from_numpy(seg) + + if mode == 'overlay': + out = img * seg + out = [out.astype('float32')] + elif mode == 'highlight': + out = img * seg[None, :, :] * 0.85 + 0.15 * img + out = [out.astype('float32')] + elif mode == 'highlight2': + img = img / 2 + out = (img+0.1) * seg[None, :, :] + 0.3 * img + out = [out.astype('float32')] + elif mode == 'blur_highlight': + from evaluation_utils import img_preprocess + out = [img_preprocess((None, [img], [seg]), blur=1, bg_fac=0.5).numpy()[0] - 0.01] + elif mode == 'blur3_highlight': + from evaluation_utils import img_preprocess + out = [img_preprocess((None, [img], [seg]), blur=3, bg_fac=0.5).numpy()[0] - 0.01] + elif mode == 'blur3_highlight01': + from evaluation_utils import img_preprocess + out = [img_preprocess((None, [img], [seg]), blur=3, bg_fac=0.1).numpy()[0] - 0.01] + elif mode == 'blur_highlight_random': + from evaluation_utils import img_preprocess + out = [img_preprocess((None, [img], [seg]), blur=0 + torch.randint(0, 3, (1,)).item(), bg_fac=0.1 + 0.8*torch.rand(1).item()).numpy()[0] - 0.01] + elif mode == 'crop': + from evaluation_utils import img_preprocess + out = [img_preprocess((None, [img], [seg]), blur=1, center_context=0.1, image_size=image_size)[0].numpy()] + elif mode == 'crop_blur_highlight': + from evaluation_utils import img_preprocess + out = [img_preprocess((None, [img], [seg]), blur=3, center_context=0.1, bg_fac=0.1, image_size=image_size)[0].numpy()] + elif mode == 'crop_blur_highlight352': + from evaluation_utils import img_preprocess + out = [img_preprocess((None, [img], [seg]), blur=3, center_context=0.1, bg_fac=0.1, image_size=352)[0].numpy()] + elif mode == 'shape': + out = [np.stack([seg[:, :]]*3).astype('float32')] + elif mode == 'concat': + out = [np.concatenate([img, seg[None, :, :]]).astype('float32')] + elif mode == 'image_only': + out = [img.astype('float32')] + elif mode == 'image_black': + out = [img.astype('float32')*0] + elif mode is None: + out = [img.astype('float32')] + elif mode == 'separate': + out = [img.astype('float32'), seg.astype('int64')] + elif mode == 'separate_img_black': + out = [img.astype('float32')*0, seg.astype('int64')] + elif mode == 'separate_seg_ones': + out = [img.astype('float32'), np.ones_like(seg).astype('int64')] + elif mode == 'separate_both_black': + out = [img.astype('float32')*0, seg.astype('int64')*0] + else: + raise ValueError(f'invalid mode: {mode}') + + return out \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/environment.yml b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/environment.yml new file mode 100644 index 0000000000000000000000000000000000000000..1dd8a2b1072d5f37d84dbadf7cdfd65f2ec07320 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/environment.yml @@ -0,0 +1,15 @@ +name: clipseg-environment +channels: + - conda-forge + - pytorch +dependencies: + - numpy + - scipy + - matplotlib-base + - pip + - pip: + - --find-links https://download.pytorch.org/whl/torch_stable.html + - torch==1.10.0+cpu + - torchvision==0.11.1+cpu + - opencv-python + - git+https://github.com/openai/CLIP.git diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/evaluation_utils.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/evaluation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8f913a98ad910db386838463908141fb9dcef442 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/evaluation_utils.py @@ -0,0 +1,292 @@ +from torch.functional import Tensor +from general_utils import load_model +from torch.utils.data import DataLoader +import torch +import numpy as np + +def denorm(img): + + np_input = False + if isinstance(img, np.ndarray): + img = torch.from_numpy(img) + np_input = True + + mean = torch.Tensor([0.485, 0.456, 0.406]) + std = torch.Tensor([0.229, 0.224, 0.225]) + + img_denorm = (img*std[:,None,None]) + mean[:,None,None] + + if np_input: + img_denorm = np.clip(img_denorm.numpy(), 0, 1) + else: + img_denorm = torch.clamp(img_denorm, 0, 1) + + return img_denorm + + +def norm(img): + mean = torch.Tensor([0.485, 0.456, 0.406]) + std = torch.Tensor([0.229, 0.224, 0.225]) + return (img - mean[:,None,None]) / std[:,None,None] + + +def fast_iou_curve(p, g): + + g = g[p.sort().indices] + p = torch.sigmoid(p.sort().values) + + scores = [] + vals = np.linspace(0, 1, 50) + + for q in vals: + + n = int(len(g) * q) + + valid = torch.where(p > q)[0] + if len(valid) > 0: + n = int(valid[0]) + else: + n = len(g) + + fn = g[:n].sum() + tn = n - fn + tp = g[n:].sum() + fp = len(g) - n - tp + + iou = tp / (tp + fn + fp) + + precision = tp / (tp + fp) + recall = tp / (tp + fn) + + scores += [iou] + + return vals, scores + + +def fast_rp_curve(p, g): + + g = g[p.sort().indices] + p = torch.sigmoid(p.sort().values) + + precisions, recalls = [], [] + vals = np.linspace(p.min(), p.max(), 250) + + for q in p[::100000]: + + n = int(len(g) * q) + + valid = torch.where(p > q)[0] + if len(valid) > 0: + n = int(valid[0]) + else: + n = len(g) + + fn = g[:n].sum() + tn = n - fn + tp = g[n:].sum() + fp = len(g) - n - tp + + iou = tp / (tp + fn + fp) + + precision = tp / (tp + fp) + recall = tp / (tp + fn) + + precisions += [precision] + recalls += [recall] + + return recalls, precisions + + +# Image processing + +def img_preprocess(batch, blur=0, grayscale=False, center_context=None, rect=False, rect_color=(255,0,0), rect_width=2, + brightness=1.0, bg_fac=1, colorize=False, outline=False, image_size=224): + import cv2 + + rw = rect_width + + out = [] + for img, mask in zip(batch[1], batch[2]): + + img = img.cpu() if isinstance(img, torch.Tensor) else torch.from_numpy(img) + mask = mask.cpu() if isinstance(mask, torch.Tensor) else torch.from_numpy(mask) + + img *= brightness + img_bl = img + if blur > 0: # best 5 + img_bl = torch.from_numpy(cv2.GaussianBlur(img.permute(1,2,0).numpy(), (15, 15), blur)).permute(2,0,1) + + if grayscale: + img_bl = img_bl[1][None] + + #img_inp = img_ratio*img*mask + (1-img_ratio)*img_bl + # img_inp = img_ratio*img*mask + (1-img_ratio)*img_bl * (1-mask) + img_inp = img*mask + (bg_fac) * img_bl * (1-mask) + + if rect: + _, bbox = crop_mask(img, mask, context=0.1) + img_inp[:, bbox[2]: bbox[3], max(0, bbox[0]-rw):bbox[0]+rw] = torch.tensor(rect_color)[:,None,None] + img_inp[:, bbox[2]: bbox[3], max(0, bbox[1]-rw):bbox[1]+rw] = torch.tensor(rect_color)[:,None,None] + img_inp[:, max(0, bbox[2]-1): bbox[2]+rw, bbox[0]:bbox[1]] = torch.tensor(rect_color)[:,None,None] + img_inp[:, max(0, bbox[3]-1): bbox[3]+rw, bbox[0]:bbox[1]] = torch.tensor(rect_color)[:,None,None] + + + if center_context is not None: + img_inp = object_crop(img_inp, mask, context=center_context, image_size=image_size) + + if colorize: + img_gray = denorm(img) + img_gray = cv2.cvtColor(img_gray.permute(1,2,0).numpy(), cv2.COLOR_RGB2GRAY) + img_gray = torch.stack([torch.from_numpy(img_gray)]*3) + img_inp = torch.tensor([1,0.2,0.2])[:,None,None] * img_gray * mask + bg_fac * img_gray * (1-mask) + img_inp = norm(img_inp) + + if outline: + cont = cv2.findContours(mask.byte().numpy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + outline_img = np.zeros(mask.shape, dtype=np.uint8) + cv2.drawContours(outline_img, cont[0], -1, thickness=5, color=(255, 255, 255)) + outline_img = torch.stack([torch.from_numpy(outline_img)]*3).float() / 255. + img_inp = torch.tensor([1,0,0])[:,None,None] * outline_img + denorm(img_inp) * (1- outline_img) + img_inp = norm(img_inp) + + out += [img_inp] + + return torch.stack(out) + + +def object_crop(img, mask, context=0.0, square=False, image_size=224): + img_crop, bbox = crop_mask(img, mask, context=context, square=square) + img_crop = pad_to_square(img_crop, channel_dim=0) + img_crop = torch.nn.functional.interpolate(img_crop.unsqueeze(0), (image_size, image_size)).squeeze(0) + return img_crop + + +def crop_mask(img, mask, context=0.0, square=False): + + assert img.shape[1:] == mask.shape + + bbox = [mask.max(0).values.argmax(), mask.size(0) - mask.max(0).values.flip(0).argmax()] + bbox += [mask.max(1).values.argmax(), mask.size(1) - mask.max(1).values.flip(0).argmax()] + bbox = [int(x) for x in bbox] + + width, height = (bbox[3] - bbox[2]), (bbox[1] - bbox[0]) + + # square mask + if square: + bbox[0] = int(max(0, bbox[0] - context * height)) + bbox[1] = int(min(mask.size(0), bbox[1] + context * height)) + bbox[2] = int(max(0, bbox[2] - context * width)) + bbox[3] = int(min(mask.size(1), bbox[3] + context * width)) + + width, height = (bbox[3] - bbox[2]), (bbox[1] - bbox[0]) + if height > width: + bbox[2] = int(max(0, (bbox[2] - 0.5*height))) + bbox[3] = bbox[2] + height + else: + bbox[0] = int(max(0, (bbox[0] - 0.5*width))) + bbox[1] = bbox[0] + width + else: + bbox[0] = int(max(0, bbox[0] - context * height)) + bbox[1] = int(min(mask.size(0), bbox[1] + context * height)) + bbox[2] = int(max(0, bbox[2] - context * width)) + bbox[3] = int(min(mask.size(1), bbox[3] + context * width)) + + width, height = (bbox[3] - bbox[2]), (bbox[1] - bbox[0]) + img_crop = img[:, bbox[2]: bbox[3], bbox[0]: bbox[1]] + return img_crop, bbox + + +def pad_to_square(img, channel_dim=2, fill=0): + """ + + + add padding such that a squared image is returned """ + + from torchvision.transforms.functional import pad + + if channel_dim == 2: + img = img.permute(2, 0, 1) + elif channel_dim == 0: + pass + else: + raise ValueError('invalid channel_dim') + + h, w = img.shape[1:] + pady1 = pady2 = padx1 = padx2 = 0 + + if h > w: + padx1 = (h - w) // 2 + padx2 = h - w - padx1 + elif w > h: + pady1 = (w - h) // 2 + pady2 = w - h - pady1 + + img_padded = pad(img, padding=(padx1, pady1, padx2, pady2), padding_mode='constant') + + if channel_dim == 2: + img_padded = img_padded.permute(1, 2, 0) + + return img_padded + + +# qualitative + +def split_sentence(inp, limit=9): + t_new, current_len = [], 0 + for k, t in enumerate(inp.split(' ')): + current_len += len(t) + 1 + t_new += [t+' '] + # not last + if current_len > limit and k != len(inp.split(' ')) - 1: + current_len = 0 + t_new += ['\n'] + + t_new = ''.join(t_new) + return t_new + + +from matplotlib import pyplot as plt + + +def plot(imgs, *preds, labels=None, scale=1, cmap=plt.cm.magma, aps=None, gt_labels=None, vmax=None): + + row_off = 0 if labels is None else 1 + _, ax = plt.subplots(len(imgs) + row_off, 1 + len(preds), figsize=(scale * float(1 + 2*len(preds)), scale * float(len(imgs)*2))) + [a.axis('off') for a in ax.flatten()] + + if labels is not None: + for j in range(len(labels)): + t_new = split_sentence(labels[j], limit=6) + ax[0, 1+ j].text(0.5, 0.1, t_new, ha='center', fontsize=3+ 10*scale) + + + for i in range(len(imgs)): + ax[i + row_off,0].imshow(imgs[i]) + for j in range(len(preds)): + img = preds[j][i][0].detach().cpu().numpy() + + if gt_labels is not None and labels[j] == gt_labels[i]: + print(j, labels[j], gt_labels[i]) + edgecolor = 'red' + if aps is not None: + ax[i + row_off, 1 + j].text(30, 70, f'AP: {aps[i]:.3f}', color='red', fontsize=8) + else: + edgecolor = 'k' + + rect = plt.Rectangle([0,0], img.shape[0], img.shape[1], facecolor="none", + edgecolor=edgecolor, linewidth=3) + ax[i + row_off,1 + j].add_patch(rect) + + if vmax is None: + this_vmax = 1 + elif vmax == 'per_prompt': + this_vmax = max([preds[j][_i][0].max() for _i in range(len(imgs))]) + elif vmax == 'per_image': + this_vmax = max([preds[_j][i][0].max() for _j in range(len(preds))]) + + ax[i + row_off,1 + j].imshow(img, vmin=0, vmax=this_vmax, cmap=cmap) + + + # ax[i,1 + j].imshow(preds[j][i][0].detach().cpu().numpy(), vmin=preds[j].min(), vmax=preds[j].max()) + plt.tight_layout() + plt.subplots_adjust(wspace=0.05, hspace=0.05) \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/example_image.jpg b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/example_image.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e47c8223599929bc45f0d64f22ca581b3442dae --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/example_image.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bffef4c76aa9ad56fa072c3b61f1733ea60a204994c5eae2262621e8c8edd686 +size 91493 diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/ablation.yaml b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/ablation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a6f5eba3718f59ccb3dfe125d5b2d4e620f9ca02 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/ablation.yaml @@ -0,0 +1,84 @@ +configuration: + batch_size: 64 + optimizer: torch.optim.AdamW + + lr: 0.001 + + trainer: experiment_setup.train_loop + scorer: experiment_setup.score + model: models.clipseg.CLIPDensePredT + + lr_scheduler: cosine + T_max: 20000 + eta_min: 0.0001 + + max_iterations: 20000 # <-########################################## + val_interval: null + + # dataset + dataset: datasets.phrasecut.PhraseCut # <----------------- + split_mode: pascal_test + split: train + mask: text_and_crop_blur_highlight352 + image_size: 352 + negative_prob: 0.2 + mix_text_max: 0.5 + + # general + mix: True # <----------------- + prompt: shuffle+ + norm_cond: True + mix_text_min: 0.0 + with_visual: True + + # model + version: 'ViT-B/16' + extract_layers: [3, 7, 9] + reduce_dim: 64 + depth: 3 + fix_shift: False # <-########################################## + + loss: torch.nn.functional.binary_cross_entropy_with_logits + amp: True + +test_configuration_common: + normalize: True + image_size: 352 + batch_size: 32 + sigmoid: True + split: test + label_support: True + +test_configuration: + + - + name: pc + metric: metrics.FixedIntervalMetrics + test_dataset: phrasecut + mask: text + + - + name: pc-vis + metric: metrics.FixedIntervalMetrics + test_dataset: phrasecut + mask: crop_blur_highlight352 + with_visual: True + visual_only: True + + +columns: [name, +pc_fgiou_best, pc_miou_best, pc_fgiou_0.5, +pc-vis_fgiou_best, pc-vis_miou_best, pc-vis_fgiou_0.5, +duration] + + +individual_configurations: + +- {name: rd64-uni} +- {name: rd64-no-pretrain, not_pretrained: True, lr: 0.0003} +- {name: rd64-no-negatives, negative_prob: 0.0} +- {name: rd64-neg0.5, negative_prob: 0.5} +- {name: rd64-no-visual, with_visual: False, mix: False} +- {name: rd16-uni, reduce_dim: 16} +- {name: rd64-layer3, extract_layers: [3], depth: 1} +- {name: rd64-blur-highlight, mask: text_and_blur_highlight, test_configuration: {mask: blur_highlight}} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/coco.yaml b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/coco.yaml new file mode 100644 index 0000000000000000000000000000000000000000..561264340f097357f723d00a2ab4b786366f2da5 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/coco.yaml @@ -0,0 +1,101 @@ +configuration: + batch_size: 64 + optimizer: torch.optim.AdamW + + lr: 0.001 + + trainer: experiment_setup.train_loop + scorer: experiment_setup.score + model: models.clipseg.CLIPDensePredT + + lr_scheduler: cosine + T_max: 20000 + eta_min: 0.0001 + + max_iterations: 20000 + val_interval: null + + # dataset + dataset: datasets.coco_wrapper.COCOWrapper + # split_mode: pascal_test + split: train + mask: text_and_blur3_highlight01 + image_size: 352 + normalize: True + pre_crop_image_size: [sample, 1, 1.5] + aug: 1new + + # general + mix: True + prompt: shuffle+ + norm_cond: True + mix_text_min: 0.0 + + # model + out: 1 + extract_layers: [3, 7, 9] + reduce_dim: 64 + depth: 3 + fix_shift: False + + loss: torch.nn.functional.binary_cross_entropy_with_logits + amp: True + +test_configuration_common: + normalize: True + image_size: 352 + # max_iterations: 10 + batch_size: 8 + sigmoid: True + test_dataset: coco + metric: metrics.FixedIntervalMetrics + +test_configuration: + + - + name: coco_t + mask: text + + - + name: coco_h + mask: blur3_highlight01 + + - + name: coco_h2 + mask: crop_blur_highlight352 + + +columns: [i, name, +coco_t_fgiou_best, coco_t_miou_best, coco_t_fgiou_0.5, +coco_h_fgiou_best, coco_h_miou_best, coco_h_fgiou_0.5, +coco_h2_fgiou_best, coco_h2_miou_best, coco_h2_fgiou_0.5, coco_h2_fgiou_best_t, +train_loss, duration, date +] + +individual_configurations: + + +- {name: rd64-7K-vit16-cbh-coco-0, version: 'ViT-B/16', fold: 0, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} +- {name: rd64-7K-vit16-cbh-coco-1, version: 'ViT-B/16', fold: 1, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} +- {name: rd64-7K-vit16-cbh-coco-2, version: 'ViT-B/16', fold: 2, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} +- {name: rd64-7K-vit16-cbh-coco-3, version: 'ViT-B/16', fold: 3, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} + + +- {name: rd64-7K-vit16-cbh-neg0.2-coco-0, version: 'ViT-B/16', negative_prob: 0.2, fold: 0, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} +- {name: rd64-7K-vit16-cbh-neg0.2-coco-1, version: 'ViT-B/16', negative_prob: 0.2, fold: 1, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} +- {name: rd64-7K-vit16-cbh-neg0.2-coco-2, version: 'ViT-B/16', negative_prob: 0.2, fold: 2, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} +- {name: rd64-7K-vit16-cbh-neg0.2-coco-3, version: 'ViT-B/16', negative_prob: 0.2, fold: 3, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} + + +# ViT +- {name: vit64-7K-vit16-cbh-coco-0, version: 'ViT-B/16', model: models.vitseg.VITDensePredT, fold: 0, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000, lr: 0.0001} +- {name: vit64-7K-vit16-cbh-coco-1, version: 'ViT-B/16', model: models.vitseg.VITDensePredT, fold: 1, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000, lr: 0.0001} +- {name: vit64-7K-vit16-cbh-coco-2, version: 'ViT-B/16', model: models.vitseg.VITDensePredT, fold: 2, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000, lr: 0.0001} +- {name: vit64-7K-vit16-cbh-coco-3, version: 'ViT-B/16', model: models.vitseg.VITDensePredT, fold: 3, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000, lr: 0.0001} + + +# BASELINE +- {name: bl64-7K-vit16-cbh-neg0.2-coco-0, model: models.clipseg.CLIPDenseBaseline, reduce2_dim: 64, version: 'ViT-B/16', negative_prob: 0.2, fold: 0, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} +- {name: bl64-7K-vit16-cbh-neg0.2-coco-1, model: models.clipseg.CLIPDenseBaseline, reduce2_dim: 64, version: 'ViT-B/16', negative_prob: 0.2, fold: 1, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} +- {name: bl64-7K-vit16-cbh-neg0.2-coco-2, model: models.clipseg.CLIPDenseBaseline, reduce2_dim: 64, version: 'ViT-B/16', negative_prob: 0.2, fold: 2, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} +- {name: bl64-7K-vit16-cbh-neg0.2-coco-3, model: models.clipseg.CLIPDenseBaseline, reduce2_dim: 64, version: 'ViT-B/16', negative_prob: 0.2, fold: 3, reduce_dim: 64, mask: text_and_crop_blur_highlight352, T_max: 7000, max_iterations: 7000} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/pascal_1shot.yaml b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/pascal_1shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1e260a68fd3a5562019518d5926d6a45f450eba --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/pascal_1shot.yaml @@ -0,0 +1,101 @@ +configuration: + batch_size: 64 + optimizer: torch.optim.AdamW + + lr: 0.001 + + trainer: experiment_setup.train_loop + scorer: experiment_setup.score + model: models.clipseg.CLIPDensePredT + + lr_scheduler: cosine + T_max: 20000 + eta_min: 0.0001 + + max_iterations: 20000 # <-########################################## + val_interval: null + + # dataset + dataset: datasets.phrasecut.PhraseCut + split_mode: pascal_test + mode: train + mask: text_and_crop_blur_highlight352 + image_size: 352 + normalize: True + pre_crop_image_size: [sample, 1, 1.5] + aug: 1new + with_visual: True + split: train + + # general + mix: True + prompt: shuffle+ + norm_cond: True + mix_text_min: 0.0 + + # model + out: 1 + version: 'ViT-B/16' + extract_layers: [3, 7, 9] + reduce_dim: 64 + depth: 3 + + loss: torch.nn.functional.binary_cross_entropy_with_logits + amp: True + +test_configuration_common: + normalize: True + image_size: 352 + metric: metrics.FixedIntervalMetrics + batch_size: 1 + test_dataset: pascal + sigmoid: True + # max_iterations: 250 + +test_configuration: + + - + name: pas_t + mask: text + + - + name: pas_h + mask: blur3_highlight01 + + - + name: pas_h2 + mask: crop_blur_highlight352 + + +columns: [name, +pas_t_fgiou_best, pas_t_miou_best, pas_t_fgiou_ct, +pas_h_fgiou_best, pas_h_miou_best, pas_h_fgiou_ct, +pas_h2_fgiou_best, pas_h2_miou_best, pas_h2_fgiou_ct, pas_h2_fgiou_best_t, +train_loss, duration, date +] + +individual_configurations: + +- {name: rd64-uni-phrasepas5i-0, remove_classes: [pas5i, 0], negative_prob: 0.2, mix_text_max: 0.5, test_configuration: {splits: [0], custom_threshold: 0.24}} +- {name: rd64-uni-phrasepas5i-1, remove_classes: [pas5i, 1], negative_prob: 0.2, mix_text_max: 0.5, test_configuration: {splits: [1], custom_threshold: 0.24}} +- {name: rd64-uni-phrasepas5i-2, remove_classes: [pas5i, 2], negative_prob: 0.2, mix_text_max: 0.5, test_configuration: {splits: [2], custom_threshold: 0.24}} +- {name: rd64-uni-phrasepas5i-3, remove_classes: [pas5i, 3], negative_prob: 0.2, mix_text_max: 0.5, test_configuration: {splits: [3], custom_threshold: 0.24}} + + +- {name: rd64-phrasepas5i-0, remove_classes: [pas5i, 0], negative_prob: 0.0, test_configuration: {splits: [0], custom_threshold: 0.28}} +- {name: rd64-phrasepas5i-1, remove_classes: [pas5i, 1], negative_prob: 0.0, test_configuration: {splits: [1], custom_threshold: 0.28}} +- {name: rd64-phrasepas5i-2, remove_classes: [pas5i, 2], negative_prob: 0.0, test_configuration: {splits: [2], custom_threshold: 0.28}} +- {name: rd64-phrasepas5i-3, remove_classes: [pas5i, 3], negative_prob: 0.0, test_configuration: {splits: [3], custom_threshold: 0.28}} + + +# baseline +- {name: bl64-phrasepas5i-0, model: models.clipseg.CLIPDenseBaseline, remove_classes: [pas5i, 0], reduce2_dim: 64, negative_prob: 0.0, test_configuration: {splits: [0], custom_threshold: 0.24}} +- {name: bl64-phrasepas5i-1, model: models.clipseg.CLIPDenseBaseline, remove_classes: [pas5i, 1], reduce2_dim: 64, negative_prob: 0.0, test_configuration: {splits: [1], custom_threshold: 0.24}} +- {name: bl64-phrasepas5i-2, model: models.clipseg.CLIPDenseBaseline, remove_classes: [pas5i, 2], reduce2_dim: 64, negative_prob: 0.0, test_configuration: {splits: [2], custom_threshold: 0.24}} +- {name: bl64-phrasepas5i-3, model: models.clipseg.CLIPDenseBaseline, remove_classes: [pas5i, 3], reduce2_dim: 64, negative_prob: 0.0, test_configuration: {splits: [3], custom_threshold: 0.24}} + +# ViT +- {name: vit64-uni-phrasepas5i-0, remove_classes: [pas5i, 0], model: models.vitseg.VITDensePredT, negative_prob: 0.2, mix_text_max: 0.5, lr: 0.0001, test_configuration: {splits: [0], custom_threshold: 0.02}} +- {name: vit64-uni-phrasepas5i-1, remove_classes: [pas5i, 1], model: models.vitseg.VITDensePredT, negative_prob: 0.2, mix_text_max: 0.5, lr: 0.0001, test_configuration: {splits: [1], custom_threshold: 0.02}} +- {name: vit64-uni-phrasepas5i-2, remove_classes: [pas5i, 2], model: models.vitseg.VITDensePredT, negative_prob: 0.2, mix_text_max: 0.5, lr: 0.0001, test_configuration: {splits: [2], custom_threshold: 0.02}} +- {name: vit64-uni-phrasepas5i-3, remove_classes: [pas5i, 3], model: models.vitseg.VITDensePredT, negative_prob: 0.2, mix_text_max: 0.5, lr: 0.0001, test_configuration: {splits: [3], custom_threshold: 0.02}} diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/phrasecut.yaml b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/phrasecut.yaml new file mode 100644 index 0000000000000000000000000000000000000000..310d2e99a0454859b41446e0fe9deca90f2ae0a0 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/experiments/phrasecut.yaml @@ -0,0 +1,80 @@ +configuration: + batch_size: 64 + optimizer: torch.optim.AdamW + + lr: 0.001 + + trainer: experiment_setup.train_loop + scorer: experiment_setup.score + model: models.clipseg.CLIPDensePredT + + lr_scheduler: cosine + T_max: 20000 + eta_min: 0.0001 + + max_iterations: 20000 + val_interval: null + + # dataset + dataset: datasets.phrasecut.PhraseCut # <----------------- + split_mode: pascal_test + split: train + mask: text_and_crop_blur_highlight352 + image_size: 352 + normalize: True + pre_crop_image_size: [sample, 1, 1.5] + aug: 1new + + # general + mix: False # <----------------- + prompt: shuffle+ + norm_cond: True + mix_text_min: 0.0 + + # model + out: 1 + extract_layers: [3, 7, 9] + reduce_dim: 64 + depth: 3 + fix_shift: False + + loss: torch.nn.functional.binary_cross_entropy_with_logits + amp: True + +test_configuration_common: + normalize: True + image_size: 352 + batch_size: 32 + # max_iterations: 5 + # max_iterations: 150 + +test_configuration: + + - + name: pc # old: phrasecut + metric: metrics.FixedIntervalMetrics + test_dataset: phrasecut + split: test + mask: text + label_support: True + sigmoid: True + + +columns: [i, name, pc_miou_0.3, pc_fgiou_0.3, pc_fgiou_0.5, pc_ap, duration, date] + + +individual_configurations: + +# important ones + + +- {name: rd64-uni, version: 'ViT-B/16', reduce_dim: 64, with_visual: True, negative_prob: 0.2, mix: True, mix_text_max: 0.5} + +# this was accedentally trained using old mask +- {name: rd128-vit16-phrasecut, version: 'ViT-B/16', reduce_dim: 128, mask: text_and_blur3_highlight01} +- {name: rd64-uni-novis, version: 'ViT-B/16', reduce_dim: 64, with_visual: False, negative_prob: 0.2, mix: False} +# this was accedentally trained using old mask +- {name: baseline3-vit16-phrasecut, model: models.clipseg.CLIPDenseBaseline, version: 'ViT-B/16', reduce_dim: 64, reduce2_dim: 64, mask: text_and_blur3_highlight01} + +- {name: vit64-uni, version: 'ViT-B/16', model: models.vitseg.VITDensePredT, reduce_dim: 64, with_visual: True, only_visual: True, negative_prob: 0.2, mask: crop_blur_highlight352, lr: 0.0003} +- {name: vit64-uni-novis, version: 'ViT-B/16', model: models.vitseg.VITDensePredT, with_visual: False, reduce_dim: 64, lr: 0.0001} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/general_utils.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/general_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..708d32e701a78f3ce848060baef561c8f11b1b2e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/general_utils.py @@ -0,0 +1,272 @@ +import json +import inspect +import torch +import os +import sys +import yaml +from shutil import copy, copytree +from os.path import join, dirname, realpath, expanduser, isfile, isdir, basename + + +class Logger(object): + + def __getattr__(self, k): + return print + +log = Logger() + +def training_config_from_cli_args(): + experiment_name = sys.argv[1] + experiment_id = int(sys.argv[2]) + + yaml_config = yaml.load(open(f'experiments/{experiment_name}'), Loader=yaml.SafeLoader) + + config = yaml_config['configuration'] + config = {**config, **yaml_config['individual_configurations'][experiment_id]} + config = AttributeDict(config) + return config + + +def score_config_from_cli_args(): + experiment_name = sys.argv[1] + experiment_id = int(sys.argv[2]) + + + yaml_config = yaml.load(open(f'experiments/{experiment_name}'), Loader=yaml.SafeLoader) + + config = yaml_config['test_configuration_common'] + + if type(yaml_config['test_configuration']) == list: + test_id = int(sys.argv[3]) + config = {**config, **yaml_config['test_configuration'][test_id]} + else: + config = {**config, **yaml_config['test_configuration']} + + if 'test_configuration' in yaml_config['individual_configurations'][experiment_id]: + config = {**config, **yaml_config['individual_configurations'][experiment_id]['test_configuration']} + + train_checkpoint_id = yaml_config['individual_configurations'][experiment_id]['name'] + + config = AttributeDict(config) + return config, train_checkpoint_id + + +def get_from_repository(local_name, repo_files, integrity_check=None, repo_dir='~/dataset_repository', + local_dir='~/datasets'): + """ copies files from repository to local folder. + + repo_files: list of filenames or list of tuples [filename, target path] + + e.g. get_from_repository('MyDataset', [['data/dataset1.tar', 'other/path/ds03.tar']) + will create a folder 'MyDataset' in local_dir, and extract the content of + '/data/dataset1.tar' to /MyDataset/other/path. + """ + + local_dir = realpath(join(expanduser(local_dir), local_name)) + + dataset_exists = True + + # check if folder is available + if not isdir(local_dir): + dataset_exists = False + + if integrity_check is not None: + try: + integrity_ok = integrity_check(local_dir) + except BaseException: + integrity_ok = False + + if integrity_ok: + log.hint('Passed custom integrity check') + else: + log.hint('Custom integrity check failed') + + dataset_exists = dataset_exists and integrity_ok + + if not dataset_exists: + + repo_dir = realpath(expanduser(repo_dir)) + + for i, filename in enumerate(repo_files): + + if type(filename) == str: + origin, target = filename, filename + archive_target = join(local_dir, basename(origin)) + extract_target = join(local_dir) + else: + origin, target = filename + archive_target = join(local_dir, dirname(target), basename(origin)) + extract_target = join(local_dir, dirname(target)) + + archive_origin = join(repo_dir, origin) + + log.hint(f'copy: {archive_origin} to {archive_target}') + + # make sure the path exists + os.makedirs(dirname(archive_target), exist_ok=True) + + if os.path.isfile(archive_target): + # only copy if size differs + if os.path.getsize(archive_target) != os.path.getsize(archive_origin): + log.hint(f'file exists but filesize differs: target {os.path.getsize(archive_target)} vs. origin {os.path.getsize(archive_origin)}') + copy(archive_origin, archive_target) + else: + copy(archive_origin, archive_target) + + extract_archive(archive_target, extract_target, noarchive_ok=True) + + # concurrent processes might have deleted the file + if os.path.isfile(archive_target): + os.remove(archive_target) + + +def extract_archive(filename, target_folder=None, noarchive_ok=False): + from subprocess import run, PIPE + + if filename.endswith('.tgz') or filename.endswith('.tar'): + command = f'tar -xf {filename}' + command += f' -C {target_folder}' if target_folder is not None else '' + elif filename.endswith('.tar.gz'): + command = f'tar -xzf {filename}' + command += f' -C {target_folder}' if target_folder is not None else '' + elif filename.endswith('zip'): + command = f'unzip {filename}' + command += f' -d {target_folder}' if target_folder is not None else '' + else: + if noarchive_ok: + return + else: + raise ValueError(f'unsuppored file ending of {filename}') + + log.hint(command) + result = run(command.split(), stdout=PIPE, stderr=PIPE) + if result.returncode != 0: + print(result.stdout, result.stderr) + + +class AttributeDict(dict): + """ + An extended dictionary that allows access to elements as atttributes and counts + these accesses. This way, we know if some attributes were never used. + """ + + def __init__(self, *args, **kwargs): + from collections import Counter + super().__init__(*args, **kwargs) + self.__dict__['counter'] = Counter() + + def __getitem__(self, k): + self.__dict__['counter'][k] += 1 + return super().__getitem__(k) + + def __getattr__(self, k): + self.__dict__['counter'][k] += 1 + return super().get(k) + + def __setattr__(self, k, v): + return super().__setitem__(k, v) + + def __delattr__(self, k, v): + return super().__delitem__(k, v) + + def unused_keys(self, exceptions=()): + return [k for k in super().keys() if self.__dict__['counter'][k] == 0 and k not in exceptions] + + def assume_no_unused_keys(self, exceptions=()): + if len(self.unused_keys(exceptions=exceptions)) > 0: + log.warning('Unused keys:', self.unused_keys(exceptions=exceptions)) + + +def get_attribute(name): + import importlib + + if name is None: + raise ValueError('The provided attribute is None') + + name_split = name.split('.') + mod = importlib.import_module('.'.join(name_split[:-1])) + return getattr(mod, name_split[-1]) + + + +def filter_args(input_args, default_args): + + updated_args = {k: input_args[k] if k in input_args else v for k, v in default_args.items()} + used_args = {k: v for k, v in input_args.items() if k in default_args} + unused_args = {k: v for k, v in input_args.items() if k not in default_args} + + return AttributeDict(updated_args), AttributeDict(used_args), AttributeDict(unused_args) + + +def load_model(checkpoint_id, weights_file=None, strict=True, model_args='from_config', with_config=False): + + config = json.load(open(join('logs', checkpoint_id, 'config.json'))) + + if model_args != 'from_config' and type(model_args) != dict: + raise ValueError('model_args must either be "from_config" or a dictionary of values') + + model_cls = get_attribute(config['model']) + + # load model + if model_args == 'from_config': + _, model_args, _ = filter_args(config, inspect.signature(model_cls).parameters) + + model = model_cls(**model_args) + + if weights_file is None: + weights_file = realpath(join('logs', checkpoint_id, 'weights.pth')) + else: + weights_file = realpath(join('logs', checkpoint_id, weights_file)) + + if isfile(weights_file): + weights = torch.load(weights_file) + for _, w in weights.items(): + assert not torch.any(torch.isnan(w)), 'weights contain NaNs' + model.load_state_dict(weights, strict=strict) + else: + raise FileNotFoundError(f'model checkpoint {weights_file} was not found') + + if with_config: + return model, config + + return model + + +class TrainingLogger(object): + + def __init__(self, model, log_dir, config=None, *args): + super().__init__() + self.model = model + self.base_path = join(f'logs/{log_dir}') if log_dir is not None else None + + os.makedirs('logs/', exist_ok=True) + os.makedirs(self.base_path, exist_ok=True) + + if config is not None: + json.dump(config, open(join(self.base_path, 'config.json'), 'w')) + + def iter(self, i, **kwargs): + if i % 100 == 0 and 'loss' in kwargs: + loss = kwargs['loss'] + print(f'iteration {i}: loss {loss:.4f}') + + def save_weights(self, only_trainable=False, weight_file='weights.pth'): + if self.model is None: + raise AttributeError('You need to provide a model reference when initializing TrainingTracker to save weights.') + + weights_path = join(self.base_path, weight_file) + + weight_dict = self.model.state_dict() + + if only_trainable: + weight_dict = {n: weight_dict[n] for n, p in self.model.named_parameters() if p.requires_grad} + + torch.save(weight_dict, weights_path) + log.info(f'Saved weights to {weights_path}') + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """ automatically stop processes if used in a context manager """ + pass \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/metrics.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..35d887b61bfa583a8852c80ff164919be7b45f4e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/metrics.py @@ -0,0 +1,271 @@ +from torch.functional import Tensor +from general_utils import log +from collections import defaultdict +import numpy as np + +import torch +from torch.nn import functional as nnf + + +class BaseMetric(object): + + def __init__(self, metric_names, pred_range=None, gt_index=0, pred_index=0, eval_intermediate=True, + eval_validation=True): + self._names = tuple(metric_names) + self._eval_intermediate = eval_intermediate + self._eval_validation = eval_validation + + self._pred_range = pred_range + self._pred_index = pred_index + self._gt_index = gt_index + + self.predictions = [] + self.ground_truths = [] + + def eval_intermediate(self): + return self._eval_intermediate + + def eval_validation(self): + return self._eval_validation + + def names(self): + return self._names + + def add(self, predictions, ground_truth): + raise NotImplementedError + + def value(self): + raise NotImplementedError + + def scores(self): + # similar to value but returns dict + value = self.value() + if type(value) == dict: + return value + else: + assert type(value) in {list, tuple} + return list(zip(self.names(), self.value())) + + def _get_pred_gt(self, predictions, ground_truth): + pred = predictions[self._pred_index] + gt = ground_truth[self._gt_index] + + if self._pred_range is not None: + pred = pred[:, self._pred_range[0]: self._pred_range[1]] + + return pred, gt + + +class FixedIntervalMetrics(BaseMetric): + + def __init__(self, sigmoid=False, ignore_mask=False, resize_to=None, + resize_pred=None, n_values=51, custom_threshold=None): + + + super().__init__(('ap', 'best_fgiou', 'best_miou', 'fgiou0.5', 'fgiou0.1', 'mean_iou_0p5', 'mean_iou_0p1', 'best_biniou', 'biniou_0.5', 'fgiou_thresh')) + self.intersections = [] + self.unions = [] + # self.threshold = threshold + self.sigmoid = sigmoid + self.resize_to = resize_to + self.resize_pred = resize_pred # resize prediction to match ground truth + self.class_count = defaultdict(lambda: 0) + self.per_class = defaultdict(lambda : [0,0]) + self.ignore_mask = ignore_mask + self.custom_threshold = custom_threshold + + self.scores_ap = [] + self.scores_iou = [] + self.gts, self.preds = [], [] + self.classes = [] + + # [1:-1] ignores 0 and 1 + self.threshold_values = np.linspace(0, 1, n_values)[1:-1] + + self.metrics = dict(tp=[], fp=[], fn=[], tn=[]) + + def add(self, pred, gt): + + pred_batch = pred[0].cpu() + + if self.sigmoid: + pred_batch = torch.sigmoid(pred_batch) + + gt_batch = gt[0].cpu() + mask_batch = gt[1] if len(gt) > 1 and not self.ignore_mask and gt[1].numel() > 0 else ([None] * len(pred_batch)) + cls_batch = gt[2] if len(gt) > 2 else [None] * len(pred_batch) + + if self.resize_to is not None: + gt_batch = nnf.interpolate(gt_batch, self.resize_to, mode='nearest') + pred_batch = nnf.interpolate(pred_batch, self.resize_to, mode='bilinear', align_corners=False) + + if isinstance(cls_batch, torch.Tensor): + cls_batch = cls_batch.cpu().numpy().tolist() + + assert len(gt_batch) == len(pred_batch) == len(cls_batch), f'{len(gt_batch)} {len(pred_batch)} {len(cls_batch)}' + + for predictions, ground_truth, mask, cls in zip(pred_batch, gt_batch, mask_batch, cls_batch): + + if self.resize_pred: + predictions = nnf.interpolate(predictions.unsqueeze(0).float(), size=ground_truth.size()[-2:], mode='bilinear', align_corners=True) + + p = predictions.flatten() + g = ground_truth.flatten() + + assert len(p) == len(g) + + if mask is not None: + m = mask.flatten().bool() + p = p[m] + g = g[m] + + p_sorted = p.sort() + p = p_sorted.values + g = g[p_sorted.indices] + + tps, fps, fns, tns = [], [], [], [] + for thresh in self.threshold_values: + + valid = torch.where(p > thresh)[0] + if len(valid) > 0: + n = int(valid[0]) + else: + n = len(g) + + fn = int(g[:n].sum()) + tp = int(g[n:].sum()) + fns += [fn] + tns += [n - fn] + tps += [tp] + fps += [len(g) - n - tp] + + self.metrics['tp'] += [tps] + self.metrics['fp'] += [fps] + self.metrics['fn'] += [fns] + self.metrics['tn'] += [tns] + + self.classes += [cls.item() if isinstance(cls, torch.Tensor) else cls] + + def value(self): + + import time + t_start = time.time() + + if set(self.classes) == set([None]): + all_classes = None + log.warning('classes were not provided, cannot compute mIoU') + else: + all_classes = set(int(c) for c in self.classes) + # log.info(f'compute metrics for {len(all_classes)} classes') + + summed = {k: [sum([self.metrics[k][i][j] + for i in range(len(self.metrics[k]))]) + for j in range(len(self.threshold_values))] + for k in self.metrics.keys()} + + if all_classes is not None: + + assert len(self.classes) == len(self.metrics['tp']) == len(self.metrics['fn']) + # group by class + metrics_by_class = {c: {k: [] for k in self.metrics.keys()} for c in all_classes} + for i in range(len(self.metrics['tp'])): + for k in self.metrics.keys(): + metrics_by_class[self.classes[i]][k] += [self.metrics[k][i]] + + # sum over all instances within the classes + summed_by_cls = {k: {c: np.array(metrics_by_class[c][k]).sum(0).tolist() for c in all_classes} for k in self.metrics.keys()} + + + # Compute average precision + + assert (np.array(summed['fp']) + np.array(summed['tp']) ).sum(), 'no predictions is made' + + # only consider values where a prediction is made + precisions = [summed['tp'][j] / (1 + summed['tp'][j] + summed['fp'][j]) for j in range(len(self.threshold_values)) + if summed['tp'][j] + summed['fp'][j] > 0] + recalls = [summed['tp'][j] / (1 + summed['tp'][j] + summed['fn'][j]) for j in range(len(self.threshold_values)) + if summed['tp'][j] + summed['fp'][j] > 0] + + # remove duplicate recall-precision-pairs (and sort by recall value) + recalls, precisions = zip(*sorted(list(set(zip(recalls, precisions))), key=lambda x: x[0])) + + from scipy.integrate import simps + ap = simps(precisions, recalls) + + # Compute best IoU + fgiou_scores = [summed['tp'][j] / (1 + summed['tp'][j] + summed['fp'][j] + summed['fn'][j]) for j in range(len(self.threshold_values))] + + biniou_scores = [ + 0.5*(summed['tp'][j] / (1 + summed['tp'][j] + summed['fp'][j] + summed['fn'][j])) + + 0.5*(summed['tn'][j] / (1 + summed['tn'][j] + summed['fn'][j] + summed['fp'][j])) + for j in range(len(self.threshold_values)) + ] + + index_0p5 = self.threshold_values.tolist().index(0.5) + index_0p1 = self.threshold_values.tolist().index(0.1) + index_0p2 = self.threshold_values.tolist().index(0.2) + index_0p3 = self.threshold_values.tolist().index(0.3) + + if self.custom_threshold is not None: + index_ct = self.threshold_values.tolist().index(self.custom_threshold) + + if all_classes is not None: + # mean IoU + mean_ious = [np.mean([summed_by_cls['tp'][c][j] / (1 + summed_by_cls['tp'][c][j] + summed_by_cls['fp'][c][j] + summed_by_cls['fn'][c][j]) + for c in all_classes]) + for j in range(len(self.threshold_values))] + + mean_iou_dict = { + 'miou_best': max(mean_ious) if all_classes is not None else None, + 'miou_0.5': mean_ious[index_0p5] if all_classes is not None else None, + 'miou_0.1': mean_ious[index_0p1] if all_classes is not None else None, + 'miou_0.2': mean_ious[index_0p2] if all_classes is not None else None, + 'miou_0.3': mean_ious[index_0p3] if all_classes is not None else None, + 'miou_best_t': self.threshold_values[np.argmax(mean_ious)], + 'mean_iou_ct': mean_ious[index_ct] if all_classes is not None and self.custom_threshold is not None else None, + 'mean_iou_scores': mean_ious, + } + + print(f'metric computation on {(len(all_classes) if all_classes is not None else "no")} classes took {time.time() - t_start:.1f}s') + + return { + 'ap': ap, + + # fgiou + 'fgiou_best': max(fgiou_scores), + 'fgiou_0.5': fgiou_scores[index_0p5], + 'fgiou_0.1': fgiou_scores[index_0p1], + 'fgiou_0.2': fgiou_scores[index_0p2], + 'fgiou_0.3': fgiou_scores[index_0p3], + 'fgiou_best_t': self.threshold_values[np.argmax(fgiou_scores)], + + # mean iou + + + # biniou + 'biniou_best': max(biniou_scores), + 'biniou_0.5': biniou_scores[index_0p5], + 'biniou_0.1': biniou_scores[index_0p1], + 'biniou_0.2': biniou_scores[index_0p2], + 'biniou_0.3': biniou_scores[index_0p3], + 'biniou_best_t': self.threshold_values[np.argmax(biniou_scores)], + + # custom threshold + 'fgiou_ct': fgiou_scores[index_ct] if self.custom_threshold is not None else None, + 'biniou_ct': biniou_scores[index_ct] if self.custom_threshold is not None else None, + 'ct': self.custom_threshold, + + # statistics + 'fgiou_scores': fgiou_scores, + 'biniou_scores': biniou_scores, + 'precision_recall_curve': sorted(list(set(zip(recalls, precisions)))), + 'summed_statistics': summed, + 'summed_by_cls_statistics': summed_by_cls, + + **mean_iou_dict + } + + # ('ap', 'best_fgiou', 'best_miou', 'fgiou0.5', 'fgiou0.1', 'mean_iou_0p5', 'mean_iou_0p1', 'best_biniou', 'biniou_0.5', 'fgiou_thresh' + + # return ap, best_fgiou, best_mean_iou, iou_0p5, iou_0p1, mean_iou_0p5, mean_iou_0p1, best_biniou, biniou0p5, best_fgiou_thresh, {'summed': summed, 'summed_by_cls': summed_by_cls} + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/models/clipseg.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/models/clipseg.py new file mode 100644 index 0000000000000000000000000000000000000000..a4640b34bbd1ca68a32114471d5585734c4af2fc --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/models/clipseg.py @@ -0,0 +1,552 @@ +import math +from os.path import basename, dirname, join, isfile +import torch +from torch import nn +from torch.nn import functional as nnf +from torch.nn.modules.activation import ReLU + + +def precompute_clip_vectors(): + + from trails.initialization import init_dataset + lvis = init_dataset('LVIS_OneShot3', split='train', mask='text_label', image_size=224, aug=1, normalize=True, + reduce_factor=None, add_bar=False, negative_prob=0.5) + + all_names = list(lvis.category_names.values()) + + import clip + from models.clip_prompts import imagenet_templates + clip_model = clip.load("ViT-B/32", device='cuda', jit=False)[0] + prompt_vectors = {} + for name in all_names[:100]: + with torch.no_grad(): + conditionals = [t.format(name).replace('_', ' ') for t in imagenet_templates] + text_tokens = clip.tokenize(conditionals).cuda() + cond = clip_model.encode_text(text_tokens).cpu() + + for cond, vec in zip(conditionals, cond): + prompt_vectors[cond] = vec.cpu() + + import pickle + + pickle.dump(prompt_vectors, open('precomputed_prompt_vectors.pickle', 'wb')) + + +def get_prompt_list(prompt): + if prompt == 'plain': + return ['{}'] + elif prompt == 'fixed': + return ['a photo of a {}.'] + elif prompt == 'shuffle': + return ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.'] + elif prompt == 'shuffle+': + return ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.', + 'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.', + 'a bad photo of a {}.', 'a photo of the {}.'] + elif prompt == 'shuffle_clip': + from models.clip_prompts import imagenet_templates + return imagenet_templates + else: + raise ValueError('Invalid value for prompt') + + +def forward_multihead_attention(x, b, with_aff=False, attn_mask=None): + """ + Simplified version of multihead attention (taken from torch source code but without tons of if clauses). + The mlp and layer norm come from CLIP. + x: input. + b: multihead attention module. + """ + + x_ = b.ln_1(x) + q, k, v = nnf.linear(x_, b.attn.in_proj_weight, b.attn.in_proj_bias).chunk(3, dim=-1) + tgt_len, bsz, embed_dim = q.size() + + head_dim = embed_dim // b.attn.num_heads + scaling = float(head_dim) ** -0.5 + + q = q.contiguous().view(tgt_len, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1) + k = k.contiguous().view(-1, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1) + v = v.contiguous().view(-1, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1) + + q = q * scaling + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) # n_heads * batch_size, tokens^2, tokens^2 + if attn_mask is not None: + + + attn_mask_type, attn_mask = attn_mask + n_heads = attn_output_weights.size(0) // attn_mask.size(0) + attn_mask = attn_mask.repeat(n_heads, 1) + + if attn_mask_type == 'cls_token': + # the mask only affects similarities compared to the readout-token. + attn_output_weights[:, 0, 1:] = attn_output_weights[:, 0, 1:] * attn_mask[None,...] + # attn_output_weights[:, 0, 0] = 0*attn_output_weights[:, 0, 0] + + if attn_mask_type == 'all': + # print(attn_output_weights.shape, attn_mask[:, None].shape) + attn_output_weights[:, 1:, 1:] = attn_output_weights[:, 1:, 1:] * attn_mask[:, None] + + + attn_output_weights = torch.softmax(attn_output_weights, dim=-1) + + attn_output = torch.bmm(attn_output_weights, v) + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = b.attn.out_proj(attn_output) + + x = x + attn_output + x = x + b.mlp(b.ln_2(x)) + + if with_aff: + return x, attn_output_weights + else: + return x + + +class CLIPDenseBase(nn.Module): + + def __init__(self, version, reduce_cond, reduce_dim, prompt, n_tokens): + super().__init__() + + import clip + + # prec = torch.FloatTensor + self.clip_model, _ = clip.load(version, device='cpu', jit=False) + self.model = self.clip_model.visual + + # if not None, scale conv weights such that we obtain n_tokens. + self.n_tokens = n_tokens + + for p in self.clip_model.parameters(): + p.requires_grad_(False) + + # conditional + if reduce_cond is not None: + self.reduce_cond = nn.Linear(512, reduce_cond) + for p in self.reduce_cond.parameters(): + p.requires_grad_(False) + else: + self.reduce_cond = None + + self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim) + self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim) + + self.reduce = nn.Linear(768, reduce_dim) + + self.prompt_list = get_prompt_list(prompt) + + # precomputed prompts + import pickle + if isfile('precomputed_prompt_vectors.pickle'): + precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb')) + self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()} + else: + self.precomputed_prompts = dict() + + def rescaled_pos_emb(self, new_size): + assert len(new_size) == 2 + + a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape) + b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T + return torch.cat([self.model.positional_embedding[:1], b]) + + def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None): + + + with torch.no_grad(): + + inp_size = x_inp.shape[2:] + + if self.n_tokens is not None: + stride2 = x_inp.shape[2] // self.n_tokens + conv_weight2 = nnf.interpolate(self.model.conv1.weight, (stride2, stride2), mode='bilinear', align_corners=True) + x = nnf.conv2d(x_inp, conv_weight2, bias=self.model.conv1.bias, stride=stride2, dilation=self.model.conv1.dilation) + else: + x = self.model.conv1(x_inp) # shape = [*, width, grid, grid] + + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + + x = torch.cat([self.model.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + + standard_n_tokens = 50 if self.model.conv1.kernel_size[0] == 32 else 197 + + if x.shape[1] != standard_n_tokens: + new_shape = int(math.sqrt(x.shape[1]-1)) + x = x + self.rescaled_pos_emb((new_shape, new_shape)).to(x.dtype)[None,:,:] + else: + x = x + self.model.positional_embedding.to(x.dtype) + + x = self.model.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + + activations, affinities = [], [] + for i, res_block in enumerate(self.model.transformer.resblocks): + + if mask is not None: + mask_layer, mask_type, mask_tensor = mask + if mask_layer == i or mask_layer == 'all': + # import ipdb; ipdb.set_trace() + size = int(math.sqrt(x.shape[0] - 1)) + + attn_mask = (mask_type, nnf.interpolate(mask_tensor.unsqueeze(1).float(), (size, size)).view(mask_tensor.shape[0], size * size)) + + else: + attn_mask = None + else: + attn_mask = None + + x, aff_per_head = forward_multihead_attention(x, res_block, with_aff=True, attn_mask=attn_mask) + + if i in extract_layers: + affinities += [aff_per_head] + + #if self.n_tokens is not None: + # activations += [nnf.interpolate(x, inp_size, mode='bilinear', align_corners=True)] + #else: + activations += [x] + + if len(extract_layers) > 0 and i == max(extract_layers) and skip: + print('early skip') + break + + x = x.permute(1, 0, 2) # LND -> NLD + x = self.model.ln_post(x[:, 0, :]) + + if self.model.proj is not None: + x = x @ self.model.proj + + return x, activations, affinities + + def sample_prompts(self, words, prompt_list=None): + + prompt_list = prompt_list if prompt_list is not None else self.prompt_list + + prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True) + prompts = [prompt_list[i] for i in prompt_indices] + return [promt.format(w) for promt, w in zip(prompts, words)] + + def get_cond_vec(self, conditional, batch_size): + # compute conditional from a single string + if conditional is not None and type(conditional) == str: + cond = self.compute_conditional(conditional) + cond = cond.repeat(batch_size, 1) + + # compute conditional from string list/tuple + elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str: + assert len(conditional) == batch_size + cond = self.compute_conditional(conditional) + + # use conditional directly + elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2: + cond = conditional + + # compute conditional from image + elif conditional is not None and type(conditional) == torch.Tensor: + with torch.no_grad(): + cond, _, _ = self.visual_forward(conditional) + else: + raise ValueError('invalid conditional') + return cond + + def compute_conditional(self, conditional): + import clip + + dev = next(self.parameters()).device + + if type(conditional) in {list, tuple}: + text_tokens = clip.tokenize(conditional).to(dev) + cond = self.clip_model.encode_text(text_tokens) + else: + if conditional in self.precomputed_prompts: + cond = self.precomputed_prompts[conditional].float().to(dev) + else: + text_tokens = clip.tokenize([conditional]).to(dev) + cond = self.clip_model.encode_text(text_tokens)[0] + + if self.shift_vector is not None: + return cond + self.shift_vector + else: + return cond + + +def clip_load_untrained(version): + assert version == 'ViT-B/16' + from clip.model import CLIP + from clip.clip import _MODELS, _download + model = torch.jit.load(_download(_MODELS['ViT-B/16'])).eval() + state_dict = model.state_dict() + + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) + + return CLIP(embed_dim, image_resolution, vision_layers, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers) + + +class CLIPDensePredT(CLIPDenseBase): + + def __init__(self, version='ViT-B/32', extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed', + extra_blocks=0, reduce_cond=None, fix_shift=False, + learn_trans_conv_only=False, limit_to_clip_only=False, upsample=False, + add_calibration=False, rev_activations=False, trans_conv=None, n_tokens=None): + + super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens) + # device = 'cpu' + + self.extract_layers = extract_layers + self.cond_layer = cond_layer + self.limit_to_clip_only = limit_to_clip_only + self.process_cond = None + self.rev_activations = rev_activations + + depth = len(extract_layers) + + if add_calibration: + self.calibration_conds = 1 + + self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None + + self.add_activation1 = True + + self.version = version + + self.token_shape = {'ViT-B/32': (7, 7), 'ViT-B/16': (14, 14)}[version] + + if fix_shift: + # self.shift_vector = nn.Parameter(torch.load(join(dirname(basename(__file__)), 'clip_text_shift_vector.pth')), requires_grad=False) + self.shift_vector = nn.Parameter(torch.load(join(dirname(basename(__file__)), 'shift_text_to_vis.pth')), requires_grad=False) + # self.shift_vector = nn.Parameter(-1*torch.load(join(dirname(basename(__file__)), 'shift2.pth')), requires_grad=False) + else: + self.shift_vector = None + + if trans_conv is None: + trans_conv_ks = {'ViT-B/32': (32, 32), 'ViT-B/16': (16, 16)}[version] + else: + # explicitly define transposed conv kernel size + trans_conv_ks = (trans_conv, trans_conv) + + self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks) + + assert len(self.extract_layers) == depth + + self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)]) + self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))]) + self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)]) + + # refinement and trans conv + + if learn_trans_conv_only: + for p in self.parameters(): + p.requires_grad_(False) + + for p in self.trans_conv.parameters(): + p.requires_grad_(True) + + self.prompt_list = get_prompt_list(prompt) + + + def forward(self, inp_image, conditional=None, return_features=False, mask=None): + + assert type(return_features) == bool + + inp_image = inp_image.to(self.model.positional_embedding.device) + + if mask is not None: + raise ValueError('mask not supported') + + # x_inp = normalize(inp_image) + x_inp = inp_image + + bs, dev = inp_image.shape[0], x_inp.device + + cond = self.get_cond_vec(conditional, bs) + + visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers)) + + activation1 = activations[0] + activations = activations[1:] + + _activations = activations[::-1] if not self.rev_activations else activations + + a = None + for i, (activation, block, reduce) in enumerate(zip(_activations, self.blocks, self.reduces)): + + if a is not None: + a = reduce(activation) + a + else: + a = reduce(activation) + + if i == self.cond_layer: + if self.reduce_cond is not None: + cond = self.reduce_cond(cond) + + a = self.film_mul(cond) * a + self.film_add(cond) + + a = block(a) + + for block in self.extra_blocks: + a = a + block(a) + + a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens + + size = int(math.sqrt(a.shape[2])) + + a = a.view(bs, a.shape[1], size, size) + + a = self.trans_conv(a) + + if self.n_tokens is not None: + a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear', align_corners=True) + + if self.upsample_proj is not None: + a = self.upsample_proj(a) + a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear') + + if return_features: + return a, visual_q, cond, [activation1] + activations + else: + return a, + + + +class CLIPDensePredTMasked(CLIPDensePredT): + + def __init__(self, version='ViT-B/32', extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, + prompt='fixed', extra_blocks=0, reduce_cond=None, fix_shift=False, learn_trans_conv_only=False, + refine=None, limit_to_clip_only=False, upsample=False, add_calibration=False, n_tokens=None): + + super().__init__(version=version, extract_layers=extract_layers, cond_layer=cond_layer, reduce_dim=reduce_dim, + n_heads=n_heads, prompt=prompt, extra_blocks=extra_blocks, reduce_cond=reduce_cond, + fix_shift=fix_shift, learn_trans_conv_only=learn_trans_conv_only, + limit_to_clip_only=limit_to_clip_only, upsample=upsample, add_calibration=add_calibration, + n_tokens=n_tokens) + + def visual_forward_masked(self, img_s, seg_s): + return super().visual_forward(img_s, mask=('all', 'cls_token', seg_s)) + + def forward(self, img_q, cond_or_img_s, seg_s=None, return_features=False): + + if seg_s is None: + cond = cond_or_img_s + else: + img_s = cond_or_img_s + + with torch.no_grad(): + cond, _, _ = self.visual_forward_masked(img_s, seg_s) + + return super().forward(img_q, cond, return_features=return_features) + + + +class CLIPDenseBaseline(CLIPDenseBase): + + def __init__(self, version='ViT-B/32', cond_layer=0, + extract_layer=9, reduce_dim=128, reduce2_dim=None, prompt='fixed', + reduce_cond=None, limit_to_clip_only=False, n_tokens=None): + + super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens) + device = 'cpu' + + # self.cond_layer = cond_layer + self.extract_layer = extract_layer + self.limit_to_clip_only = limit_to_clip_only + self.shift_vector = None + + self.token_shape = {'ViT-B/32': (7, 7), 'ViT-B/16': (14, 14)}[version] + + assert reduce2_dim is not None + + self.reduce2 = nn.Sequential( + nn.Linear(reduce_dim, reduce2_dim), + nn.ReLU(), + nn.Linear(reduce2_dim, reduce_dim) + ) + + trans_conv_ks = {'ViT-B/32': (32, 32), 'ViT-B/16': (16, 16)}[version] + self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks) + + + def forward(self, inp_image, conditional=None, return_features=False): + + inp_image = inp_image.to(self.model.positional_embedding.device) + + # x_inp = normalize(inp_image) + x_inp = inp_image + + bs, dev = inp_image.shape[0], x_inp.device + + cond = self.get_cond_vec(conditional, bs) + + visual_q, activations, affinities = self.visual_forward(x_inp, extract_layers=[self.extract_layer]) + + a = activations[0] + a = self.reduce(a) + a = self.film_mul(cond) * a + self.film_add(cond) + + if self.reduce2 is not None: + a = self.reduce2(a) + + # the original model would execute a transformer block here + + a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens + + size = int(math.sqrt(a.shape[2])) + + a = a.view(bs, a.shape[1], size, size) + a = self.trans_conv(a) + + if return_features: + return a, visual_q, cond, activations + else: + return a, + + +class CLIPSegMultiLabel(nn.Module): + + def __init__(self, model) -> None: + super().__init__() + + from third_party.JoEm.data_loader import get_seen_idx, get_unseen_idx, VOC + + self.pascal_classes = VOC + + from models.clipseg import CLIPDensePredT + from general_utils import load_model + # self.clipseg = load_model('rd64-vit16-neg0.2-phrasecut', strict=False) + self.clipseg = load_model(model, strict=False) + + self.clipseg.eval() + + def forward(self, x): + + bs = x.shape[0] + out = torch.ones(21, bs, 352, 352).to(x.device) * -10 + + for class_id, class_name in enumerate(self.pascal_classes): + + fac = 3 if class_name == 'background' else 1 + + with torch.no_grad(): + pred = torch.sigmoid(self.clipseg(x, class_name)[0][:,0]) * fac + + out[class_id] += pred + + + out = out.permute(1, 0, 2, 3) + + return out + + # construct output tensor + \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/models/vitseg.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/models/vitseg.py new file mode 100644 index 0000000000000000000000000000000000000000..ed621431ddf930fcfa27b5929999776b96fede63 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/models/vitseg.py @@ -0,0 +1,286 @@ +import math +from posixpath import basename, dirname, join +# import clip +from clip.model import convert_weights +import torch +import json +from torch import nn +from torch.nn import functional as nnf +from torch.nn.modules import activation +from torch.nn.modules.activation import ReLU +from torchvision import transforms + +normalize = transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711)) + +from torchvision.models import ResNet + + +def process_prompts(conditional, prompt_list, conditional_map): + # DEPRECATED + + # randomly sample a synonym + words = [conditional_map[int(i)] for i in conditional] + words = [syns[torch.multinomial(torch.ones(len(syns)), 1, replacement=True).item()] for syns in words] + words = [w.replace('_', ' ') for w in words] + + if prompt_list is not None: + prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True) + prompts = [prompt_list[i] for i in prompt_indices] + else: + prompts = ['a photo of {}'] * (len(words)) + + return [promt.format(w) for promt, w in zip(prompts, words)] + + +class VITDenseBase(nn.Module): + + def rescaled_pos_emb(self, new_size): + assert len(new_size) == 2 + + a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape) + b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T + return torch.cat([self.model.positional_embedding[:1], b]) + + def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None): + + with torch.no_grad(): + + x_inp = nnf.interpolate(x_inp, (384, 384)) + + x = self.model.patch_embed(x_inp) + cls_token = self.model.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks + if self.model.dist_token is None: + x = torch.cat((cls_token, x), dim=1) + else: + x = torch.cat((cls_token, self.model.dist_token.expand(x.shape[0], -1, -1), x), dim=1) + x = self.model.pos_drop(x + self.model.pos_embed) + + activations = [] + for i, block in enumerate(self.model.blocks): + x = block(x) + + if i in extract_layers: + # permute to be compatible with CLIP + activations += [x.permute(1,0,2)] + + x = self.model.norm(x) + x = self.model.head(self.model.pre_logits(x[:, 0])) + + # again for CLIP compatibility + # x = x.permute(1, 0, 2) + + return x, activations, None + + def sample_prompts(self, words, prompt_list=None): + + prompt_list = prompt_list if prompt_list is not None else self.prompt_list + + prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True) + prompts = [prompt_list[i] for i in prompt_indices] + return [promt.format(w) for promt, w in zip(prompts, words)] + + def get_cond_vec(self, conditional, batch_size): + # compute conditional from a single string + if conditional is not None and type(conditional) == str: + cond = self.compute_conditional(conditional) + cond = cond.repeat(batch_size, 1) + + # compute conditional from string list/tuple + elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str: + assert len(conditional) == batch_size + cond = self.compute_conditional(conditional) + + # use conditional directly + elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2: + cond = conditional + + # compute conditional from image + elif conditional is not None and type(conditional) == torch.Tensor: + with torch.no_grad(): + cond, _, _ = self.visual_forward(conditional) + else: + raise ValueError('invalid conditional') + return cond + + def compute_conditional(self, conditional): + import clip + + dev = next(self.parameters()).device + + if type(conditional) in {list, tuple}: + text_tokens = clip.tokenize(conditional).to(dev) + cond = self.clip_model.encode_text(text_tokens) + else: + if conditional in self.precomputed_prompts: + cond = self.precomputed_prompts[conditional].float().to(dev) + else: + text_tokens = clip.tokenize([conditional]).to(dev) + cond = self.clip_model.encode_text(text_tokens)[0] + + return cond + + +class VITDensePredT(VITDenseBase): + + def __init__(self, extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed', + depth=3, extra_blocks=0, reduce_cond=None, fix_shift=False, + learn_trans_conv_only=False, refine=None, limit_to_clip_only=False, upsample=False, + add_calibration=False, process_cond=None, not_pretrained=False): + super().__init__() + # device = 'cpu' + + self.extract_layers = extract_layers + self.cond_layer = cond_layer + self.limit_to_clip_only = limit_to_clip_only + self.process_cond = None + + if add_calibration: + self.calibration_conds = 1 + + self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None + + self.add_activation1 = True + + import timm + self.model = timm.create_model('vit_base_patch16_384', pretrained=True) + self.model.head = nn.Linear(768, 512 if reduce_cond is None else reduce_cond) + + for p in self.model.parameters(): + p.requires_grad_(False) + + import clip + self.clip_model, _ = clip.load('ViT-B/16', device='cpu', jit=False) + # del self.clip_model.visual + + + self.token_shape = (14, 14) + + # conditional + if reduce_cond is not None: + self.reduce_cond = nn.Linear(512, reduce_cond) + for p in self.reduce_cond.parameters(): + p.requires_grad_(False) + else: + self.reduce_cond = None + + # self.film = AVAILABLE_BLOCKS['film'](512, 128) + self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim) + self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim) + + # DEPRECATED + # self.conditional_map = {c['id']: c['synonyms'] for c in json.load(open(cond_map))} + + assert len(self.extract_layers) == depth + + self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)]) + self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))]) + self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)]) + + trans_conv_ks = (16, 16) + self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks) + + # refinement and trans conv + + if learn_trans_conv_only: + for p in self.parameters(): + p.requires_grad_(False) + + for p in self.trans_conv.parameters(): + p.requires_grad_(True) + + if prompt == 'fixed': + self.prompt_list = ['a photo of a {}.'] + elif prompt == 'shuffle': + self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.'] + elif prompt == 'shuffle+': + self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.', + 'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.', + 'a bad photo of a {}.', 'a photo of the {}.'] + elif prompt == 'shuffle_clip': + from models.clip_prompts import imagenet_templates + self.prompt_list = imagenet_templates + + if process_cond is not None: + if process_cond == 'clamp' or process_cond[0] == 'clamp': + + val = process_cond[1] if type(process_cond) in {list, tuple} else 0.2 + + def clamp_vec(x): + return torch.clamp(x, -val, val) + + self.process_cond = clamp_vec + + elif process_cond.endswith('.pth'): + + shift = torch.load(process_cond) + def add_shift(x): + return x + shift.to(x.device) + + self.process_cond = add_shift + + import pickle + precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb')) + self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()} + + + def forward(self, inp_image, conditional=None, return_features=False, mask=None): + + assert type(return_features) == bool + + # inp_image = inp_image.to(self.model.positional_embedding.device) + + if mask is not None: + raise ValueError('mask not supported') + + # x_inp = normalize(inp_image) + x_inp = inp_image + + bs, dev = inp_image.shape[0], x_inp.device + + inp_image_size = inp_image.shape[2:] + + cond = self.get_cond_vec(conditional, bs) + + visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers)) + + activation1 = activations[0] + activations = activations[1:] + + a = None + for i, (activation, block, reduce) in enumerate(zip(activations[::-1], self.blocks, self.reduces)): + + if a is not None: + a = reduce(activation) + a + else: + a = reduce(activation) + + if i == self.cond_layer: + if self.reduce_cond is not None: + cond = self.reduce_cond(cond) + + a = self.film_mul(cond) * a + self.film_add(cond) + + a = block(a) + + for block in self.extra_blocks: + a = a + block(a) + + a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens + + size = int(math.sqrt(a.shape[2])) + + a = a.view(bs, a.shape[1], size, size) + + if self.trans_conv is not None: + a = self.trans_conv(a) + + if self.upsample_proj is not None: + a = self.upsample_proj(a) + a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear') + + a = nnf.interpolate(a, inp_image_size) + + if return_features: + return a, visual_q, cond, [activation1] + activations + else: + return a, diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/overview.png b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/overview.png new file mode 100644 index 0000000000000000000000000000000000000000..1f77bc7743746108fa34bfbaa6e9e8c4db213c8c --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/overview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c46050cecb3068b9b4b263458b5d0da154a95a7a47b8ce312ba402f0dda3cbfe +size 53964 diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/score.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/score.py new file mode 100644 index 0000000000000000000000000000000000000000..8db8915b109953931fa2a330a7731db4a51b44f8 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/score.py @@ -0,0 +1,453 @@ +from torch.functional import Tensor + +import torch +import inspect +import json +import yaml +import time +import sys + +from general_utils import log + +import numpy as np +from os.path import expanduser, join, isfile, realpath + +from torch.utils.data import DataLoader + +from metrics import FixedIntervalMetrics + +from general_utils import load_model, log, score_config_from_cli_args, AttributeDict, get_attribute, filter_args + + +DATASET_CACHE = dict() + +def load_model(checkpoint_id, weights_file=None, strict=True, model_args='from_config', with_config=False, ignore_weights=False): + + config = json.load(open(join('logs', checkpoint_id, 'config.json'))) + + if model_args != 'from_config' and type(model_args) != dict: + raise ValueError('model_args must either be "from_config" or a dictionary of values') + + model_cls = get_attribute(config['model']) + + # load model + if model_args == 'from_config': + _, model_args, _ = filter_args(config, inspect.signature(model_cls).parameters) + + model = model_cls(**model_args) + + if weights_file is None: + weights_file = realpath(join('logs', checkpoint_id, 'weights.pth')) + else: + weights_file = realpath(join('logs', checkpoint_id, weights_file)) + + if isfile(weights_file) and not ignore_weights: + weights = torch.load(weights_file) + for _, w in weights.items(): + assert not torch.any(torch.isnan(w)), 'weights contain NaNs' + model.load_state_dict(weights, strict=strict) + else: + if not ignore_weights: + raise FileNotFoundError(f'model checkpoint {weights_file} was not found') + + if with_config: + return model, config + + return model + + +def compute_shift2(model, datasets, seed=123, repetitions=1): + """ computes shift """ + + model.eval() + model.cuda() + + import random + random.seed(seed) + + preds, gts = [], [] + for i_dataset, dataset in enumerate(datasets): + + loader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False, drop_last=False) + + max_iterations = int(repetitions * len(dataset.dataset.data_list)) + + with torch.no_grad(): + + i, losses = 0, [] + for i_all, (data_x, data_y) in enumerate(loader): + + data_x = [v.cuda(non_blocking=True) if v is not None else v for v in data_x] + data_y = [v.cuda(non_blocking=True) if v is not None else v for v in data_y] + + pred, = model(data_x[0], data_x[1], data_x[2]) + preds += [pred.detach()] + gts += [data_y] + + i += 1 + if max_iterations and i >= max_iterations: + break + + from metrics import FixedIntervalMetrics + n_values = 51 + thresholds = np.linspace(0, 1, n_values)[1:-1] + metric = FixedIntervalMetrics(resize_pred=True, sigmoid=True, n_values=n_values) + + for p, y in zip(preds, gts): + metric.add(p.unsqueeze(1), y) + + best_idx = np.argmax(metric.value()['fgiou_scores']) + best_thresh = thresholds[best_idx] + + return best_thresh + + +def get_cached_pascal_pfe(split, config): + from datasets.pfe_dataset import PFEPascalWrapper + try: + dataset = DATASET_CACHE[(split, config.image_size, config.label_support, config.mask)] + except KeyError: + dataset = PFEPascalWrapper(mode='val', split=split, mask=config.mask, image_size=config.image_size, label_support=config.label_support) + DATASET_CACHE[(split, config.image_size, config.label_support, config.mask)] = dataset + return dataset + + + + +def main(): + config, train_checkpoint_id = score_config_from_cli_args() + + metrics = score(config, train_checkpoint_id, None) + + for dataset in metrics.keys(): + for k in metrics[dataset]: + if type(metrics[dataset][k]) in {float, int}: + print(dataset, f'{k:<16} {metrics[dataset][k]:.3f}') + + +def score(config, train_checkpoint_id, train_config): + + config = AttributeDict(config) + + print(config) + + # use training dataset and loss + train_config = AttributeDict(json.load(open(f'logs/{train_checkpoint_id}/config.json'))) + + cp_str = f'_{config.iteration_cp}' if config.iteration_cp is not None else '' + + + model_cls = get_attribute(train_config['model']) + + _, model_args, _ = filter_args(train_config, inspect.signature(model_cls).parameters) + + model_args = {**model_args, **{k: config[k] for k in ['process_cond', 'fix_shift'] if k in config}} + + strict_models = {'ConditionBase4', 'PFENetWrapper'} + model = load_model(train_checkpoint_id, strict=model_cls.__name__ in strict_models, model_args=model_args, + weights_file=f'weights{cp_str}.pth', ) + + + model.eval() + model.cuda() + + metric_args = dict() + + if 'threshold' in config: + if config.metric.split('.')[-1] == 'SkLearnMetrics': + metric_args['threshold'] = config.threshold + + if 'resize_to' in config: + metric_args['resize_to'] = config.resize_to + + if 'sigmoid' in config: + metric_args['sigmoid'] = config.sigmoid + + if 'custom_threshold' in config: + metric_args['custom_threshold'] = config.custom_threshold + + if config.test_dataset == 'pascal': + + loss_fn = get_attribute(train_config.loss) + # assume that if no split is specified in train_config, test on all splits, + + if 'splits' in config: + splits = config.splits + else: + if 'split' in train_config and type(train_config.split) == int: + # unless train_config has a split set, in that case assume train mode in training + splits = [train_config.split] + assert train_config.mode == 'train' + else: + splits = [0,1,2,3] + + log.info('Test on these splits', splits) + + scores = dict() + for split in splits: + + shift = config.shift if 'shift' in config else 0 + + # automatic shift + if shift == 'auto': + shift_compute_t = time.time() + shift = compute_shift2(model, [get_cached_pascal_pfe(s, config) for s in range(4) if s != split], repetitions=config.compute_shift_fac) + log.info(f'Best threshold is {shift}, computed on splits: {[s for s in range(4) if s != split]}, took {time.time() - shift_compute_t:.1f}s') + + dataset = get_cached_pascal_pfe(split, config) + + eval_start_t = time.time() + + loader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False, drop_last=False) + + assert config.batch_size is None or config.batch_size == 1, 'When PFE Dataset is used, batch size must be 1' + + metric = FixedIntervalMetrics(resize_pred=True, sigmoid=True, custom_threshold=shift, **metric_args) + + with torch.no_grad(): + + i, losses = 0, [] + for i_all, (data_x, data_y) in enumerate(loader): + + data_x = [v.cuda(non_blocking=True) if isinstance(v, torch.Tensor) else v for v in data_x] + data_y = [v.cuda(non_blocking=True) if isinstance(v, torch.Tensor) else v for v in data_y] + + if config.mask == 'separate': # for old CondBase model + pred, = model(data_x[0], data_x[1], data_x[2]) + else: + # assert config.mask in {'text', 'highlight'} + pred, _, _, _ = model(data_x[0], data_x[1], return_features=True) + + # loss = loss_fn(pred, data_y[0]) + metric.add(pred.unsqueeze(1) + shift, data_y) + + # losses += [float(loss)] + + i += 1 + if config.max_iterations and i >= config.max_iterations: + break + + #scores[split] = {m: s for m, s in zip(metric.names(), metric.value())} + + log.info(f'Dataset length: {len(dataset)}, took {time.time() - eval_start_t:.1f}s to evaluate.') + + print(metric.value()['mean_iou_scores']) + + scores[split] = metric.scores() + + log.info(f'Completed split {split}') + + key_prefix = config['name'] if 'name' in config else 'pas' + + all_keys = set.intersection(*[set(v.keys()) for v in scores.values()]) + + valid_keys = [k for k in all_keys if all(v[k] is not None and isinstance(v[k], (int, float, np.float)) for v in scores.values())] + + return {key_prefix: {k: np.mean([s[k] for s in scores.values()]) for k in valid_keys}} + + + if config.test_dataset == 'coco': + from datasets.coco_wrapper import COCOWrapper + + coco_dataset = COCOWrapper('test', fold=train_config.fold, image_size=train_config.image_size, mask=config.mask, + with_class_label=True) + + log.info('Dataset length', len(coco_dataset)) + loader = DataLoader(coco_dataset, batch_size=config.batch_size, num_workers=2, shuffle=False, drop_last=False) + + metric = get_attribute(config.metric)(resize_pred=True, **metric_args) + + shift = config.shift if 'shift' in config else 0 + + with torch.no_grad(): + + i, losses = 0, [] + for i_all, (data_x, data_y) in enumerate(loader): + data_x = [v.cuda(non_blocking=True) if isinstance(v, torch.Tensor) else v for v in data_x] + data_y = [v.cuda(non_blocking=True) if isinstance(v, torch.Tensor) else v for v in data_y] + + if config.mask == 'separate': # for old CondBase model + pred, = model(data_x[0], data_x[1], data_x[2]) + else: + # assert config.mask in {'text', 'highlight'} + pred, _, _, _ = model(data_x[0], data_x[1], return_features=True) + + metric.add([pred + shift], data_y) + + i += 1 + if config.max_iterations and i >= config.max_iterations: + break + + key_prefix = config['name'] if 'name' in config else 'coco' + return {key_prefix: metric.scores()} + #return {key_prefix: {k: v for k, v in zip(metric.names(), metric.value())}} + + + if config.test_dataset == 'phrasecut': + from datasets.phrasecut import PhraseCut + + only_visual = config.only_visual is not None and config.only_visual + with_visual = config.with_visual is not None and config.with_visual + + dataset = PhraseCut('test', + image_size=train_config.image_size, + mask=config.mask, + with_visual=with_visual, only_visual=only_visual, aug_crop=False, + aug_color=False) + + loader = DataLoader(dataset, batch_size=config.batch_size, num_workers=2, shuffle=False, drop_last=False) + metric = get_attribute(config.metric)(resize_pred=True, **metric_args) + + shift = config.shift if 'shift' in config else 0 + + + with torch.no_grad(): + + i, losses = 0, [] + for i_all, (data_x, data_y) in enumerate(loader): + data_x = [v.cuda(non_blocking=True) if isinstance(v, torch.Tensor) else v for v in data_x] + data_y = [v.cuda(non_blocking=True) if isinstance(v, torch.Tensor) else v for v in data_y] + + pred, _, _, _ = model(data_x[0], data_x[1], return_features=True) + metric.add([pred + shift], data_y) + + i += 1 + if config.max_iterations and i >= config.max_iterations: + break + + key_prefix = config['name'] if 'name' in config else 'phrasecut' + return {key_prefix: metric.scores()} + #return {key_prefix: {k: v for k, v in zip(metric.names(), metric.value())}} + + if config.test_dataset == 'pascal_zs': + from third_party.JoEm.model.metric import Evaluator + from third_party.JoEm.data_loader import get_seen_idx, get_unseen_idx, VOC + from datasets.pascal_zeroshot import PascalZeroShot, PASCAL_VOC_CLASSES_ZS + + from models.clipseg import CLIPSegMultiLabel + + n_unseen = train_config.remove_classes[1] + + pz = PascalZeroShot('val', n_unseen, image_size=352) + m = CLIPSegMultiLabel(model=train_config.name).cuda() + m.eval(); + + print(len(pz), n_unseen) + print('training removed', [c for class_set in PASCAL_VOC_CLASSES_ZS[:n_unseen // 2] for c in class_set]) + + print('unseen', [VOC[i] for i in get_unseen_idx(n_unseen)]) + print('seen', [VOC[i] for i in get_seen_idx(n_unseen)]) + + loader = DataLoader(pz, batch_size=8) + evaluator = Evaluator(21, get_unseen_idx(n_unseen), get_seen_idx(n_unseen)) + + for i, (data_x, data_y) in enumerate(loader): + pred = m(data_x[0].cuda()) + evaluator.add_batch(data_y[0].numpy(), pred.argmax(1).cpu().detach().numpy()) + + if config.max_iter is not None and i > config.max_iter: + break + + scores = evaluator.Mean_Intersection_over_Union() + key_prefix = config['name'] if 'name' in config else 'pas_zs' + + return {key_prefix: {k: scores[k] for k in ['seen', 'unseen', 'harmonic', 'overall']}} + + elif config.test_dataset in {'same_as_training', 'affordance'}: + loss_fn = get_attribute(train_config.loss) + + metric_cls = get_attribute(config.metric) + metric = metric_cls(**metric_args) + + if config.test_dataset == 'same_as_training': + dataset_cls = get_attribute(train_config.dataset) + elif config.test_dataset == 'affordance': + dataset_cls = get_attribute('datasets.lvis_oneshot3.LVIS_Affordance') + dataset_name = 'aff' + else: + dataset_cls = get_attribute('datasets.lvis_oneshot3.LVIS_OneShot') + dataset_name = 'lvis' + + _, dataset_args, _ = filter_args(config, inspect.signature(dataset_cls).parameters) + + dataset_args['image_size'] = train_config.image_size # explicitly use training image size for evaluation + + if model.__class__.__name__ == 'PFENetWrapper': + dataset_args['image_size'] = config.image_size + + log.info('init dataset', str(dataset_cls)) + dataset = dataset_cls(**dataset_args) + + log.info(f'Score on {model.__class__.__name__} on {dataset_cls.__name__}') + + data_loader = torch.utils.data.DataLoader(dataset, batch_size=config.batch_size, shuffle=config.shuffle) + + # explicitly set prompts + if config.prompt == 'plain': + model.prompt_list = ['{}'] + elif config.prompt == 'fixed': + model.prompt_list = ['a photo of a {}.'] + elif config.prompt == 'shuffle': + model.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.'] + elif config.prompt == 'shuffle_clip': + from models.clip_prompts import imagenet_templates + model.prompt_list = imagenet_templates + + config.assume_no_unused_keys(exceptions=['max_iterations']) + + t_start = time.time() + + with torch.no_grad(): # TODO: switch to inference_mode (torch 1.9) + i, losses = 0, [] + for data_x, data_y in data_loader: + + data_x = [x.cuda() if isinstance(x, torch.Tensor) else x for x in data_x] + data_y = [x.cuda() if isinstance(x, torch.Tensor) else x for x in data_y] + + if model.__class__.__name__ in {'ConditionBase4', 'PFENetWrapper'}: + pred, = model(data_x[0], data_x[1], data_x[2]) + visual_q = None + else: + pred, visual_q, _, _ = model(data_x[0], data_x[1], return_features=True) + + loss = loss_fn(pred, data_y[0]) + + metric.add([pred], data_y) + + losses += [float(loss)] + + i += 1 + if config.max_iterations and i >= config.max_iterations: + break + + # scores = {m: s for m, s in zip(metric.names(), metric.value())} + scores = metric.scores() + + keys = set(scores.keys()) + if dataset.negative_prob > 0 and 'mIoU' in keys: + keys.remove('mIoU') + + name_mask = dataset.mask.replace('text_label', 'txt')[:3] + name_neg = '' if dataset.negative_prob == 0 else '_' + str(dataset.negative_prob) + + score_name = config.name if 'name' in config else f'{dataset_name}_{name_mask}{name_neg}' + + scores = {score_name: {k: v for k,v in scores.items() if k in keys}} + scores[score_name].update({'test_loss': np.mean(losses)}) + + log.info(f'Evaluation took {time.time() - t_start:.1f}s') + + return scores + else: + raise ValueError('invalid test dataset') + + + + + + + + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/setup.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..2bf28ffe269cba3033af263db5f98313772818f0 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/setup.py @@ -0,0 +1,30 @@ +from setuptools import setup + +with open("README.md", "r", encoding="utf-8") as readme_file: + readme = readme_file.read() + +requirements = [ + "numpy", + "scipy", + "matplotlib", + "torch", + "torchvision", + "opencv-python", + "CLIP @ git+https://github.com/openai/CLIP.git" +] + +setup( + name='clipseg', + packages=['clipseg'], + package_dir={'clipseg': 'models'}, + package_data={'clipseg': [ + "../weights/*.pth", + ]}, + version='0.0.1', + url='https://github.com/timojl/clipseg', + python_requires='>=3.9', + install_requires=requirements, + description='This repository contains the code used in the paper "Image Segmentation Using Text and Image Prompts".', + long_description=readme, + long_description_content_type="text/markdown", +) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/training.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/training.py new file mode 100644 index 0000000000000000000000000000000000000000..ce12cf443f37e2520658614e15d0e64eb554b7f1 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/training.py @@ -0,0 +1,266 @@ +import torch +import inspect +import json +import yaml +import math +import os +import sys + +from general_utils import log + +import numpy as np +from functools import partial +from os.path import expanduser, join, isfile, basename + +from torch.cuda.amp import autocast, GradScaler +from torch.optim.lr_scheduler import LambdaLR +from contextlib import nullcontext +from torch.utils.data import DataLoader + +from general_utils import TrainingLogger, get_attribute, filter_args, log, training_config_from_cli_args + + +def cosine_warmup_lr(i, warmup=10, max_iter=90): + """ Cosine LR with Warmup """ + if i < warmup: + return (i+1)/(warmup+1) + else: + return 0.5 + 0.5*math.cos(math.pi*(((i-warmup)/(max_iter- warmup)))) + + +def validate(model, dataset, config): + data_loader = torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=False) + + metric_class, use_metric = config.val_metric_class, config.use_val_metric + loss_fn = get_attribute(config.loss) + + model.eval() + model.cuda() + + if metric_class is not None: + metric = get_attribute(metric_class)() + + with torch.no_grad(): + + i, losses = 0, [] + for data_x, data_y in data_loader: + + data_x = [x.cuda() if isinstance(x, torch.Tensor) else x for x in data_x] + data_y = [x.cuda() if isinstance(x, torch.Tensor) else x for x in data_y] + + prompts = model.sample_prompts(data_x[1], prompt_list=('a photo of a {}',)) + pred, visual_q, _, _ = model(data_x[0], prompts, return_features=True) + + if metric_class is not None: + metric.add([pred], data_y) + + # pred = model(data_x[0], prompts) + # loss = loss_fn(pred[0], data_y[0]) + loss = loss_fn(pred, data_y[0]) + losses += [float(loss)] + + i += 1 + + if config.val_max_iterations is not None and i > config.val_max_iterations: + break + + if use_metric is None: + return np.mean(losses), {}, False + else: + metric_scores = {m: s for m, s in zip(metric.names(), metric.value())} if metric is not None else {} + return np.mean(losses), metric_scores, True + + +def main(): + + config = training_config_from_cli_args() + + val_interval, best_val_loss, best_val_score = config.val_interval, float('inf'), float('-inf') + + model_cls = get_attribute(config.model) + _, model_args, _ = filter_args(config, inspect.signature(model_cls).parameters) + model = model_cls(**model_args).cuda() + + dataset_cls = get_attribute(config.dataset) + _, dataset_args, _ = filter_args(config, inspect.signature(dataset_cls).parameters) + + dataset = dataset_cls(**dataset_args) + + log.info(f'Train dataset {dataset.__class__.__name__} (length: {len(dataset)})') + + if val_interval is not None: + dataset_val_args = {k[4:]: v for k,v in config.items() if k.startswith('val_') and k != 'val_interval'} + _, dataset_val_args, _ = filter_args(dataset_val_args, inspect.signature(dataset_cls).parameters) + print('val args', {**dataset_args, **{'split': 'val', 'aug': 0}, **dataset_val_args}) + + dataset_val = dataset_cls(**{**dataset_args, **{'split': 'val', 'aug': 0}, **dataset_val_args}) + + # optimizer + opt_cls = get_attribute(config.optimizer) + if config.optimize == 'torch.optim.SGD': + opt_args = {'momentum': config.momentum if 'momentum' in config else 0} + else: + opt_args = {} + opt = opt_cls(model.parameters(), lr=config.lr, **opt_args) + + if config.lr_scheduler == 'cosine': + assert config.T_max is not None and config.eta_min is not None + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(opt, config.T_max, config.eta_min) + elif config.lr_scheduler == 'warmup_cosine': + lr_scheduler = LambdaLR(opt, partial(cosine_warmup_lr, max_iter=(config.max_iterations), warmup=config.warmup)) + else: + lr_scheduler = None + + batch_size, max_iterations = config.batch_size, config.max_iterations + + loss_fn = get_attribute(config.loss) + + if config.amp: + log.info('Using AMP') + autocast_fn = autocast + scaler = GradScaler() + else: + autocast_fn, scaler = nullcontext, None + + + save_only_trainable = True + data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=4) + + # disable config when hyperparam. opt. to avoid writing logs. + tracker_config = config if not config.hyperparameter_optimization else None + + with TrainingLogger(log_dir=config.name, model=model, config=tracker_config) as logger: + + i = 0 + while True: + for data_x, data_y in data_loader: + + # between caption and output feature. + # 1. Sample random captions + # 2. Check alignment with CLIP + + # randomly mix text and visual support conditionals + if config.mix: + + assert config.mask.startswith('text_and') + + with autocast_fn(): + # data_x[1] = text label + prompts = model.sample_prompts(data_x[1]) + + # model.clip_model() + + text_cond = model.compute_conditional(prompts) + if model.__class__.__name__ == 'CLIPDensePredTMasked': + # when mask=='separate' + visual_s_cond, _, _ = model.visual_forward_masked(data_x[2].cuda(), data_x[3].cuda()) + else: + # data_x[2] = visual prompt + visual_s_cond, _, _ = model.visual_forward(data_x[2].cuda()) + + max_txt = config.mix_text_max if config.mix_text_max is not None else 1 + batch_size = text_cond.shape[0] + + # sample weights for each element in batch + text_weights = torch.distributions.Uniform(config.mix_text_min, max_txt).sample((batch_size,))[:, None] + text_weights = text_weights.cuda() + + if dataset.__class__.__name__ == 'PhraseCut': + # give full weight to text where support_image is invalid + visual_is_valid = data_x[4] if model.__class__.__name__ == 'CLIPDensePredTMasked' else data_x[3] + text_weights = torch.max(text_weights[:,0], 1 - visual_is_valid.float().cuda()).unsqueeze(1) + + cond = text_cond * text_weights + visual_s_cond * (1 - text_weights) + + else: + # no mix + + if model.__class__.__name__ == 'CLIPDensePredTMasked': + # compute conditional vector using CLIP masking + with autocast_fn(): + assert config.mask == 'separate' + cond, _, _ = model.visual_forward_masked(data_x[1].cuda(), data_x[2].cuda()) + else: + cond = data_x[1] + if isinstance(cond, torch.Tensor): + cond = cond.cuda() + + with autocast_fn(): + visual_q = None + + pred, visual_q, _, _ = model(data_x[0].cuda(), cond, return_features=True) + + loss = loss_fn(pred, data_y[0].cuda()) + + if torch.isnan(loss) or torch.isinf(loss): + # skip if loss is nan + log.warning('Training stopped due to inf/nan loss.') + sys.exit(-1) + + extra_loss = 0 + loss += extra_loss + + opt.zero_grad() + + if scaler is None: + loss.backward() + opt.step() + else: + scaler.scale(loss).backward() + scaler.step(opt) + scaler.update() + + if lr_scheduler is not None: + lr_scheduler.step() + if i % 2000 == 0: + current_lr = [g['lr'] for g in opt.param_groups][0] + log.info(f'current lr: {current_lr:.5f} ({len(opt.param_groups)} parameter groups)') + + logger.iter(i=i, loss=loss) + i += 1 + + if i >= max_iterations: + + if not isfile(join(logger.base_path, 'weights.pth')): + # only write if no weights were already written + logger.save_weights(only_trainable=save_only_trainable) + + sys.exit(0) + + + if config.checkpoint_iterations is not None and i in config.checkpoint_iterations: + logger.save_weights(only_trainable=save_only_trainable, weight_file=f'weights_{i}.pth') + + + if val_interval is not None and i % val_interval == val_interval - 1: + + val_loss, val_scores, maximize = validate(model, dataset_val, config) + + if len(val_scores) > 0: + + score_str = f', scores: ' + ', '.join(f'{k}: {v}' for k, v in val_scores.items()) + + if maximize and val_scores[config.use_val_metric] > best_val_score: + logger.save_weights(only_trainable=save_only_trainable) + best_val_score = val_scores[config.use_val_metric] + + elif not maximize and val_scores[config.use_val_metric] < best_val_score: + logger.save_weights(only_trainable=save_only_trainable) + best_val_score = val_scores[config.use_val_metric] + + else: + score_str = '' + # if no score is used, fall back to loss + if val_loss < best_val_loss: + logger.save_weights(only_trainable=save_only_trainable) + best_val_loss = val_loss + + log.info(f'Validation loss: {val_loss}' + score_str) + logger.iter(i=i, val_loss=val_loss, extra_loss=float(extra_loss), **val_scores) + model.train() + + print('epoch complete') + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/weights/rd64-uni.pth b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/weights/rd64-uni.pth new file mode 100644 index 0000000000000000000000000000000000000000..375ca251b1e5f598254c4c49b0ee0d07a88ebdd9 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/clipseg/weights/rd64-uni.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13845f6cee4d54ca46f62ee19dd354822094a26e0efccc64e606be93d6a7e26f +size 4306645 diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/film_interpolation/film_inference.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/film_interpolation/film_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..0d4106d0ae285078daf289202165512db391d86d --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/film_interpolation/film_inference.py @@ -0,0 +1,126 @@ +import os +import gc +from glob import glob +import bisect +from tqdm import tqdm +import torch +import numpy as np +import cv2 +from .film_util import load_image +import time +from types import SimpleNamespace +from modules.shared import cmd_opts +import warnings +warnings.filterwarnings("ignore") + +def run_film_interp_infer( + model_path = None, + input_folder = None, + save_folder = None, + inter_frames = None): + + args = SimpleNamespace() + args.model_path = model_path + args.input_folder = input_folder + args.save_folder = save_folder + args.inter_frames = inter_frames + + # Check if the folder exists + if not os.path.exists(args.input_folder): + print(f"Error: Folder '{args.input_folder}' does not exist.") + return + # Check if the folder contains any PNG or JPEG images + if not any([f.endswith(".png") or f.endswith(".jpg") for f in os.listdir(args.input_folder)]): + print(f"Error: Folder '{args.input_folder}' does not contain any PNG or JPEG images.") + return + + start_time = time.time() # Timer START + + # Sort Jpg/Png images by name + image_paths = sorted(glob(os.path.join(args.input_folder, "*.[jJ][pP][gG]")) + glob(os.path.join(args.input_folder, "*.[pP][nN][gG]"))) + print(f"Total frames to FILM-interpolate: {len(image_paths)}. Total frame-pairs: {len(image_paths)-1}.") + + model = torch.jit.load(args.model_path, map_location='cpu') + # half precision the model if user didn't pass --no-half/ --precision full cmd arg flags + if not cmd_opts.no_half: + model = model.half() + model = model.cuda() + model.eval() + + for i in tqdm(range(len(image_paths) - 1), desc='FILM progress'): + img1 = image_paths[i] + img2 = image_paths[i+1] + img_batch_1, crop_region_1 = load_image(img1) + img_batch_2, crop_region_2 = load_image(img2) + img_batch_1 = torch.from_numpy(img_batch_1).permute(0, 3, 1, 2) + img_batch_2 = torch.from_numpy(img_batch_2).permute(0, 3, 1, 2) + + save_path = os.path.join(args.save_folder, f"{i}_to_{i+1}.jpg") + + results = [ + img_batch_1, + img_batch_2 + ] + + idxes = [0, inter_frames + 1] + remains = list(range(1, inter_frames + 1)) + + splits = torch.linspace(0, 1, inter_frames + 2) + + inner_loop_progress = tqdm(range(len(remains)), leave=False, disable=True) + for _ in inner_loop_progress: + starts = splits[idxes[:-1]] + ends = splits[idxes[1:]] + distances = ((splits[None, remains] - starts[:, None]) / (ends[:, None] - starts[:, None]) - .5).abs() + matrix = torch.argmin(distances).item() + start_i, step = np.unravel_index(matrix, distances.shape) + end_i = start_i + 1 + + x0 = results[start_i] + x1 = results[end_i] + + x0 = x0.half() + x1 = x1.half() + x0 = x0.cuda() + x1 = x1.cuda() + + dt = x0.new_full((1, 1), (splits[remains[step]] - splits[idxes[start_i]])) / (splits[idxes[end_i]] - splits[idxes[start_i]]) + + with torch.no_grad(): + prediction = model(x0, x1, dt) + insert_position = bisect.bisect_left(idxes, remains[step]) + idxes.insert(insert_position, remains[step]) + results.insert(insert_position, prediction.clamp(0, 1).cpu().float()) + inner_loop_progress.update(1) + del remains[step] + inner_loop_progress.close() + # create output folder for interoplated imgs to live in + os.makedirs(args.save_folder, exist_ok=True) + + y1, x1, y2, x2 = crop_region_1 + frames = [(tensor[0] * 255).byte().flip(0).permute(1, 2, 0).numpy()[y1:y2, x1:x2].copy() for tensor in results] + + existing_files = os.listdir(args.save_folder) + if len(existing_files) > 0: + existing_numbers = [int(file.split("_")[1].split(".")[0]) for file in existing_files] + next_number = max(existing_numbers) + 1 + else: + next_number = 0 + + outer_loop_count = i + for i, frame in enumerate(frames): + frame_path = os.path.join(args.save_folder, f"frame_{next_number:09d}.png") + # last pair, save all frames including the last one + if len(image_paths) - 2 == outer_loop_count: + cv2.imwrite(frame_path, frame) + else: # not last pair, don't save the last frame + if not i == len(frames) - 1: + cv2.imwrite(frame_path, frame) + next_number += 1 + + # remove FILM model from memory + if model is not None: + del model + torch.cuda.empty_cache() + gc.collect() + print(f"Interpolation \033[0;32mdone\033[0m in {time.time()-start_time:.2f} seconds!") \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/film_interpolation/film_util.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/film_interpolation/film_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e510758e53ced0af433fc14f63bf9b504e256544 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/film_interpolation/film_util.py @@ -0,0 +1,161 @@ +"""Various utilities used in the film_net frame interpolator model.""" +from typing import List, Optional + +import cv2 +import numpy as np +import torch +from torch import nn +from torch.nn import functional as F + + +def pad_batch(batch, align): + height, width = batch.shape[1:3] + height_to_pad = (align - height % align) if height % align != 0 else 0 + width_to_pad = (align - width % align) if width % align != 0 else 0 + + crop_region = [height_to_pad >> 1, width_to_pad >> 1, height + (height_to_pad >> 1), width + (width_to_pad >> 1)] + batch = np.pad(batch, ((0, 0), (height_to_pad >> 1, height_to_pad - (height_to_pad >> 1)), + (width_to_pad >> 1, width_to_pad - (width_to_pad >> 1)), (0, 0)), mode='constant') + return batch, crop_region + + +def load_image(path, align=64): + image = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB).astype(np.float32) / np.float32(255) + image_batch, crop_region = pad_batch(np.expand_dims(image, axis=0), align) + return image_batch, crop_region + + +def build_image_pyramid(image: torch.Tensor, pyramid_levels: int = 3) -> List[torch.Tensor]: + """Builds an image pyramid from a given image. + + The original image is included in the pyramid and the rest are generated by + successively halving the resolution. + + Args: + image: the input image. + options: film_net options object + + Returns: + A list of images starting from the finest with options.pyramid_levels items + """ + + pyramid = [] + for i in range(pyramid_levels): + pyramid.append(image) + if i < pyramid_levels - 1: + image = F.avg_pool2d(image, 2, 2) + return pyramid + + +def warp(image: torch.Tensor, flow: torch.Tensor) -> torch.Tensor: + """Backward warps the image using the given flow. + + Specifically, the output pixel in batch b, at position x, y will be computed + as follows: + (flowed_y, flowed_x) = (y+flow[b, y, x, 1], x+flow[b, y, x, 0]) + output[b, y, x] = bilinear_lookup(image, b, flowed_y, flowed_x) + + Note that the flow vectors are expected as [x, y], e.g. x in position 0 and + y in position 1. + + Args: + image: An image with shape BxHxWxC. + flow: A flow with shape BxHxWx2, with the two channels denoting the relative + offset in order: (dx, dy). + Returns: + A warped image. + """ + flow = -flow.flip(1) + + dtype = flow.dtype + device = flow.device + + # warped = tfa_image.dense_image_warp(image, flow) + # Same as above but with pytorch + ls1 = 1 - 1 / flow.shape[3] + ls2 = 1 - 1 / flow.shape[2] + + normalized_flow2 = flow.permute(0, 2, 3, 1) / torch.tensor( + [flow.shape[2] * .5, flow.shape[3] * .5], dtype=dtype, device=device)[None, None, None] + normalized_flow2 = torch.stack([ + torch.linspace(-ls1, ls1, flow.shape[3], dtype=dtype, device=device)[None, None, :] - normalized_flow2[..., 1], + torch.linspace(-ls2, ls2, flow.shape[2], dtype=dtype, device=device)[None, :, None] - normalized_flow2[..., 0], + ], dim=3) + + warped = F.grid_sample(image, normalized_flow2, + mode='bilinear', padding_mode='border', align_corners=False) + return warped.reshape(image.shape) + + +def multiply_pyramid(pyramid: List[torch.Tensor], + scalar: torch.Tensor) -> List[torch.Tensor]: + """Multiplies all image batches in the pyramid by a batch of scalars. + + Args: + pyramid: Pyramid of image batches. + scalar: Batch of scalars. + + Returns: + An image pyramid with all images multiplied by the scalar. + """ + # To multiply each image with its corresponding scalar, we first transpose + # the batch of images from BxHxWxC-format to CxHxWxB. This can then be + # multiplied with a batch of scalars, then we transpose back to the standard + # BxHxWxC form. + return [image * scalar for image in pyramid] + + +def flow_pyramid_synthesis( + residual_pyramid: List[torch.Tensor]) -> List[torch.Tensor]: + """Converts a residual flow pyramid into a flow pyramid.""" + flow = residual_pyramid[-1] + flow_pyramid: List[torch.Tensor] = [flow] + for residual_flow in residual_pyramid[:-1][::-1]: + level_size = residual_flow.shape[2:4] + flow = F.interpolate(2 * flow, size=level_size, mode='bilinear') + flow = residual_flow + flow + flow_pyramid.insert(0, flow) + return flow_pyramid + + +def pyramid_warp(feature_pyramid: List[torch.Tensor], + flow_pyramid: List[torch.Tensor]) -> List[torch.Tensor]: + """Warps the feature pyramid using the flow pyramid. + + Args: + feature_pyramid: feature pyramid starting from the finest level. + flow_pyramid: flow fields, starting from the finest level. + + Returns: + Reverse warped feature pyramid. + """ + warped_feature_pyramid = [] + for features, flow in zip(feature_pyramid, flow_pyramid): + warped_feature_pyramid.append(warp(features, flow)) + return warped_feature_pyramid + + +def concatenate_pyramids(pyramid1: List[torch.Tensor], + pyramid2: List[torch.Tensor]) -> List[torch.Tensor]: + """Concatenates each pyramid level together in the channel dimension.""" + result = [] + for features1, features2 in zip(pyramid1, pyramid2): + result.append(torch.cat([features1, features2], dim=1)) + return result + + +def conv(in_channels, out_channels, size, activation: Optional[str] = 'relu'): + # Since PyTorch doesn't have an in-built activation in Conv2d, we use a + # Sequential layer to combine Conv2d and Leaky ReLU in one module. + _conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=size, + padding='same') + if activation is None: + return _conv + assert activation == 'relu' + return nn.Sequential( + _conv, + nn.LeakyReLU(.2) + ) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/infer.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/infer.py new file mode 100644 index 0000000000000000000000000000000000000000..4474e972a7c2ad25e1078b6549805dc26164fdbb --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/infer.py @@ -0,0 +1,165 @@ +import glob +import os + +import numpy as np +import torch +import torch.nn as nn +from PIL import Image +from torchvision import transforms +from tqdm import tqdm + +import model_io +import utils +from adabins import UnetAdaptiveBins + + +def _is_pil_image(img): + return isinstance(img, Image.Image) + + +def _is_numpy_image(img): + return isinstance(img, np.ndarray) and (img.ndim in {2, 3}) + + +class ToTensor(object): + def __init__(self): + self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + + def __call__(self, image, target_size=(640, 480)): + # image = image.resize(target_size) + image = self.to_tensor(image) + image = self.normalize(image) + return image + + def to_tensor(self, pic): + if not (_is_pil_image(pic) or _is_numpy_image(pic)): + raise TypeError( + 'pic should be PIL Image or ndarray. Got {}'.format(type(pic))) + + if isinstance(pic, np.ndarray): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + return img + + # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + + img = img.transpose(0, 1).transpose(0, 2).contiguous() + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img + + +class InferenceHelper: + def __init__(self, models_path, dataset='nyu', device='cuda:0'): + self.toTensor = ToTensor() + self.device = device + if dataset == 'nyu': + self.min_depth = 1e-3 + self.max_depth = 10 + self.saving_factor = 1000 # used to save in 16 bit + model = UnetAdaptiveBins.build(n_bins=256, min_val=self.min_depth, max_val=self.max_depth) + pretrained_path = os.path.join(models_path, "AdaBins_nyu.pt") + elif dataset == 'kitti': + self.min_depth = 1e-3 + self.max_depth = 80 + self.saving_factor = 256 + model = UnetAdaptiveBins.build(n_bins=256, min_val=self.min_depth, max_val=self.max_depth) + pretrained_path = os.path.join(models_path, "AdaBins_kitti.pt") + else: + raise ValueError("dataset can be either 'nyu' or 'kitti' but got {}".format(dataset)) + + model, _, _ = model_io.load_checkpoint(pretrained_path, model) + model.eval() + self.model = model.to(self.device) + + @torch.no_grad() + def predict_pil(self, pil_image, visualized=False): + # pil_image = pil_image.resize((640, 480)) + img = np.asarray(pil_image) / 255. + + img = self.toTensor(img).unsqueeze(0).float().to(self.device) + bin_centers, pred = self.predict(img) + + if visualized: + viz = utils.colorize(torch.from_numpy(pred).unsqueeze(0), vmin=None, vmax=None, cmap='magma') + # pred = np.asarray(pred*1000, dtype='uint16') + viz = Image.fromarray(viz) + return bin_centers, pred, viz + return bin_centers, pred + + @torch.no_grad() + def predict(self, image): + bins, pred = self.model(image) + pred = np.clip(pred.cpu().numpy(), self.min_depth, self.max_depth) + + # Flip + image = torch.Tensor(np.array(image.cpu().numpy())[..., ::-1].copy()).to(self.device) + pred_lr = self.model(image)[-1] + pred_lr = np.clip(pred_lr.cpu().numpy()[..., ::-1], self.min_depth, self.max_depth) + + # Take average of original and mirror + final = 0.5 * (pred + pred_lr) + final = nn.functional.interpolate(torch.Tensor(final), image.shape[-2:], + mode='bilinear', align_corners=True).cpu().numpy() + + final[final < self.min_depth] = self.min_depth + final[final > self.max_depth] = self.max_depth + final[np.isinf(final)] = self.max_depth + final[np.isnan(final)] = self.min_depth + + centers = 0.5 * (bins[:, 1:] + bins[:, :-1]) + centers = centers.cpu().squeeze().numpy() + centers = centers[centers > self.min_depth] + centers = centers[centers < self.max_depth] + + return centers, final + + @torch.no_grad() + def predict_dir(self, test_dir, out_dir): + os.makedirs(out_dir, exist_ok=True) + transform = ToTensor() + all_files = glob.glob(os.path.join(test_dir, "*")) + self.model.eval() + for f in tqdm(all_files): + image = np.asarray(Image.open(f), dtype='float32') / 255. + image = transform(image).unsqueeze(0).to(self.device) + + centers, final = self.predict(image) + # final = final.squeeze().cpu().numpy() + + final = (final * self.saving_factor).astype('uint16') + basename = os.path.basename(f).split('.')[0] + save_path = os.path.join(out_dir, basename + ".png") + + Image.fromarray(final.squeeze()).save(save_path) + + def to(self, device): + self.device = device + self.model.to(device) + + +if __name__ == '__main__': + import matplotlib.pyplot as plt + from time import time + + img = Image.open("test_imgs/classroom__rgb_00283.jpg") + start = time() + inferHelper = InferenceHelper() + centers, pred = inferHelper.predict_pil(img) + print(f"took :{time() - start}s") + plt.imshow(pred.squeeze(), cmap='magma_r') + plt.show() diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/Resnet.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/Resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f12c9975c1aa05401269be3ca3dbaa56bde55581 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/Resnet.py @@ -0,0 +1,199 @@ +import torch.nn as nn +import torch.nn as NN + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d + self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) + self.bn3 = NN.BatchNorm2d(planes * self.expansion) #NN.BatchNorm2d + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000): + self.inplanes = 64 + super(ResNet, self).__init__() + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = NN.BatchNorm2d(64) #NN.BatchNorm2d + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + #self.avgpool = nn.AvgPool2d(7, stride=1) + #self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + NN.BatchNorm2d(planes * block.expansion), #NN.BatchNorm2d + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + features = [] + + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + features.append(x) + x = self.layer2(x) + features.append(x) + x = self.layer3(x) + features.append(x) + x = self.layer4(x) + features.append(x) + + return features + + +def resnet18(pretrained=True, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + return model + + +def resnet34(pretrained=True, **kwargs): + """Constructs a ResNet-34 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) + return model + + +def resnet50(pretrained=True, **kwargs): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + + return model + + +def resnet101(pretrained=True, **kwargs): + """Constructs a ResNet-101 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + + return model + + +def resnet152(pretrained=True, **kwargs): + """Constructs a ResNet-152 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) + return model diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/Resnext_torch.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/Resnext_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..e5ce4c50a4975acf02079488e42cfd9d686572d3 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/Resnext_torch.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python +# coding: utf-8 +import torch.nn as nn + +try: + from urllib import urlretrieve +except ImportError: + from urllib.request import urlretrieve + +__all__ = ['resnext101_32x8d'] + + +model_urls = { + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) + # while original implementation places the stride at the first 1x1 convolution(self.conv1) + # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. + # This variant is also known as ResNet V1.5 and improves accuracy according to + # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + #self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + #self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + features = [] + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + features.append(x) + + x = self.layer2(x) + features.append(x) + + x = self.layer3(x) + features.append(x) + + x = self.layer4(x) + features.append(x) + + #x = self.avgpool(x) + #x = torch.flatten(x, 1) + #x = self.fc(x) + + return features + + def forward(self, x): + return self._forward_impl(x) + + + +def resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a ResNet-152 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + return model + + + +if __name__ == '__main__': + import torch + model = resnext101_32x8d(True).cuda() + + rgb = torch.rand((2, 3, 256, 256)).cuda() + out = model(rgb) + print(len(out)) + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/__init__.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/__init__.py @@ -0,0 +1 @@ + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/multi_depth_model_woauxi.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/multi_depth_model_woauxi.py new file mode 100644 index 0000000000000000000000000000000000000000..90a384bdd725dcede31fb599eb4e575ff404a8bd --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/multi_depth_model_woauxi.py @@ -0,0 +1,34 @@ +import torch +import torch.nn as nn + +from leres.lib import network_auxi as network +from leres.lib.net_tools import get_func + +class RelDepthModel(nn.Module): + def __init__(self, backbone='resnet50'): + super(RelDepthModel, self).__init__() + if backbone == 'resnet50': + encoder = 'resnet50_stride32' + elif backbone == 'resnext101': + encoder = 'resnext101_stride32x8d' + self.depth_model = DepthModel(encoder) + + def inference(self, rgb): + with torch.no_grad(): + input = rgb.cuda() + depth = self.depth_model(input) + pred_depth_out = depth - depth.min() + 0.01 + return pred_depth_out + + +class DepthModel(nn.Module): + def __init__(self, encoder): + super(DepthModel, self).__init__() + backbone = network.__name__.split('.')[-1] + '.' + encoder + self.encoder_modules = get_func(backbone)() + self.decoder_modules = network.Decoder() + + def forward(self, x): + lateral_out = self.encoder_modules(x) + out_logit = self.decoder_modules(lateral_out) + return out_logit \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/net_tools.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/net_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..870455d27a51eff01ec412a298a01360f0be8f59 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/net_tools.py @@ -0,0 +1,53 @@ +import importlib +import torch +import os +from collections import OrderedDict + + +def get_func(func_name): + """Helper to return a function object by name. func_name must identify a + function in this module or the path to a function relative to the base + 'modeling' module. + """ + if func_name == '': + return None + try: + parts = func_name.split('.') + # Refers to a function in this module + if len(parts) == 1: + return globals()[parts[0]] + # Otherwise, assume we're referencing a module under modeling + module_name = 'leres.lib.' + '.'.join(parts[:-1]) + module = importlib.import_module(module_name) + return getattr(module, parts[-1]) + except Exception: + print('Failed to f1ind function: %s', func_name) + raise + +def load_ckpt(args, depth_model, shift_model, focal_model): + """ + Load checkpoint. + """ + if os.path.isfile(args): + print("loading LeReS checkpoint from %s" % args) + checkpoint = torch.load(args) + if shift_model is not None: + shift_model.load_state_dict(strip_prefix_if_present(checkpoint['shift_model'], 'module.'), + strict=True) + if focal_model is not None: + focal_model.load_state_dict(strip_prefix_if_present(checkpoint['focal_model'], 'module.'), + strict=True) + depth_model.load_state_dict(strip_prefix_if_present(checkpoint['depth_model'], "module."), + strict=True) + del checkpoint + torch.cuda.empty_cache() + + +def strip_prefix_if_present(state_dict, prefix): + keys = sorted(state_dict.keys()) + if not all(key.startswith(prefix) for key in keys): + return state_dict + stripped_state_dict = OrderedDict() + for key, value in state_dict.items(): + stripped_state_dict[key.replace(prefix, "")] = value + return stripped_state_dict \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/network_auxi.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/network_auxi.py new file mode 100644 index 0000000000000000000000000000000000000000..638296a775ffc431654758f954d9c5e75c9e1882 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/network_auxi.py @@ -0,0 +1,417 @@ +import torch +import torch.nn as nn +import torch.nn.init as init + +from leres.lib import Resnet, Resnext_torch + + +def resnet50_stride32(): + return DepthNet(backbone='resnet', depth=50, upfactors=[2, 2, 2, 2]) + +def resnext101_stride32x8d(): + return DepthNet(backbone='resnext101_32x8d', depth=101, upfactors=[2, 2, 2, 2]) + + +class Decoder(nn.Module): + def __init__(self): + super(Decoder, self).__init__() + self.inchannels = [256, 512, 1024, 2048] + self.midchannels = [256, 256, 256, 512] + self.upfactors = [2,2,2,2] + self.outchannels = 1 + + self.conv = FTB(inchannels=self.inchannels[3], midchannels=self.midchannels[3]) + self.conv1 = nn.Conv2d(in_channels=self.midchannels[3], out_channels=self.midchannels[2], kernel_size=3, padding=1, stride=1, bias=True) + self.upsample = nn.Upsample(scale_factor=self.upfactors[3], mode='bilinear', align_corners=True) + + self.ffm2 = FFM(inchannels=self.inchannels[2], midchannels=self.midchannels[2], outchannels = self.midchannels[2], upfactor=self.upfactors[2]) + self.ffm1 = FFM(inchannels=self.inchannels[1], midchannels=self.midchannels[1], outchannels = self.midchannels[1], upfactor=self.upfactors[1]) + self.ffm0 = FFM(inchannels=self.inchannels[0], midchannels=self.midchannels[0], outchannels = self.midchannels[0], upfactor=self.upfactors[0]) + + self.outconv = AO(inchannels=self.midchannels[0], outchannels=self.outchannels, upfactor=2) + self._init_params() + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): #NN.BatchNorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + def forward(self, features): + x_32x = self.conv(features[3]) # 1/32 + x_32 = self.conv1(x_32x) + x_16 = self.upsample(x_32) # 1/16 + + x_8 = self.ffm2(features[2], x_16) # 1/8 + x_4 = self.ffm1(features[1], x_8) # 1/4 + x_2 = self.ffm0(features[0], x_4) # 1/2 + #----------------------------------------- + x = self.outconv(x_2) # original size + return x + +class DepthNet(nn.Module): + __factory = { + 18: Resnet.resnet18, + 34: Resnet.resnet34, + 50: Resnet.resnet50, + 101: Resnet.resnet101, + 152: Resnet.resnet152 + } + def __init__(self, + backbone='resnet', + depth=50, + upfactors=[2, 2, 2, 2]): + super(DepthNet, self).__init__() + self.backbone = backbone + self.depth = depth + self.pretrained = False + self.inchannels = [256, 512, 1024, 2048] + self.midchannels = [256, 256, 256, 512] + self.upfactors = upfactors + self.outchannels = 1 + + # Build model + if self.backbone == 'resnet': + if self.depth not in DepthNet.__factory: + raise KeyError("Unsupported depth:", self.depth) + self.encoder = DepthNet.__factory[depth](pretrained=self.pretrained) + elif self.backbone == 'resnext101_32x8d': + self.encoder = Resnext_torch.resnext101_32x8d(pretrained=self.pretrained) + else: + self.encoder = Resnext_torch.resnext101(pretrained=self.pretrained) + + def forward(self, x): + x = self.encoder(x) # 1/32, 1/16, 1/8, 1/4 + return x + + +class FTB(nn.Module): + def __init__(self, inchannels, midchannels=512): + super(FTB, self).__init__() + self.in1 = inchannels + self.mid = midchannels + self.conv1 = nn.Conv2d(in_channels=self.in1, out_channels=self.mid, kernel_size=3, padding=1, stride=1, + bias=True) + # NN.BatchNorm2d + self.conv_branch = nn.Sequential(nn.ReLU(inplace=True), \ + nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3, + padding=1, stride=1, bias=True), \ + nn.BatchNorm2d(num_features=self.mid), \ + nn.ReLU(inplace=True), \ + nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3, + padding=1, stride=1, bias=True)) + self.relu = nn.ReLU(inplace=True) + + self.init_params() + + def forward(self, x): + x = self.conv1(x) + x = x + self.conv_branch(x) + x = self.relu(x) + + return x + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + +class ATA(nn.Module): + def __init__(self, inchannels, reduction=8): + super(ATA, self).__init__() + self.inchannels = inchannels + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential(nn.Linear(self.inchannels * 2, self.inchannels // reduction), + nn.ReLU(inplace=True), + nn.Linear(self.inchannels // reduction, self.inchannels), + nn.Sigmoid()) + self.init_params() + + def forward(self, low_x, high_x): + n, c, _, _ = low_x.size() + x = torch.cat([low_x, high_x], 1) + x = self.avg_pool(x) + x = x.view(n, -1) + x = self.fc(x).view(n, c, 1, 1) + x = low_x * x + high_x + + return x + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + # init.normal(m.weight, std=0.01) + init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + # init.normal_(m.weight, std=0.01) + init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + +class FFM(nn.Module): + def __init__(self, inchannels, midchannels, outchannels, upfactor=2): + super(FFM, self).__init__() + self.inchannels = inchannels + self.midchannels = midchannels + self.outchannels = outchannels + self.upfactor = upfactor + + self.ftb1 = FTB(inchannels=self.inchannels, midchannels=self.midchannels) + # self.ata = ATA(inchannels = self.midchannels) + self.ftb2 = FTB(inchannels=self.midchannels, midchannels=self.outchannels) + + self.upsample = nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True) + + self.init_params() + + def forward(self, low_x, high_x): + x = self.ftb1(low_x) + x = x + high_x + x = self.ftb2(x) + x = self.upsample(x) + + return x + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + +class AO(nn.Module): + # Adaptive output module + def __init__(self, inchannels, outchannels, upfactor=2): + super(AO, self).__init__() + self.inchannels = inchannels + self.outchannels = outchannels + self.upfactor = upfactor + + self.adapt_conv = nn.Sequential( + nn.Conv2d(in_channels=self.inchannels, out_channels=self.inchannels // 2, kernel_size=3, padding=1, + stride=1, bias=True), \ + nn.BatchNorm2d(num_features=self.inchannels // 2), \ + nn.ReLU(inplace=True), \ + nn.Conv2d(in_channels=self.inchannels // 2, out_channels=self.outchannels, kernel_size=3, padding=1, + stride=1, bias=True), \ + nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True)) + + self.init_params() + + def forward(self, x): + x = self.adapt_conv(x) + return x + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + + +# ============================================================================================================== + + +class ResidualConv(nn.Module): + def __init__(self, inchannels): + super(ResidualConv, self).__init__() + # NN.BatchNorm2d + self.conv = nn.Sequential( + # nn.BatchNorm2d(num_features=inchannels), + nn.ReLU(inplace=False), + # nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=3, padding=1, stride=1, groups=inchannels,bias=True), + # nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=1, padding=0, stride=1, groups=1,bias=True) + nn.Conv2d(in_channels=inchannels, out_channels=inchannels / 2, kernel_size=3, padding=1, stride=1, + bias=False), + nn.BatchNorm2d(num_features=inchannels / 2), + nn.ReLU(inplace=False), + nn.Conv2d(in_channels=inchannels / 2, out_channels=inchannels, kernel_size=3, padding=1, stride=1, + bias=False) + ) + self.init_params() + + def forward(self, x): + x = self.conv(x) + x + return x + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + +class FeatureFusion(nn.Module): + def __init__(self, inchannels, outchannels): + super(FeatureFusion, self).__init__() + self.conv = ResidualConv(inchannels=inchannels) + # NN.BatchNorm2d + self.up = nn.Sequential(ResidualConv(inchannels=inchannels), + nn.ConvTranspose2d(in_channels=inchannels, out_channels=outchannels, kernel_size=3, + stride=2, padding=1, output_padding=1), + nn.BatchNorm2d(num_features=outchannels), + nn.ReLU(inplace=True)) + + def forward(self, lowfeat, highfeat): + return self.up(highfeat + self.conv(lowfeat)) + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + +class SenceUnderstand(nn.Module): + def __init__(self, channels): + super(SenceUnderstand, self).__init__() + self.channels = channels + self.conv1 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1), + nn.ReLU(inplace=True)) + self.pool = nn.AdaptiveAvgPool2d(8) + self.fc = nn.Sequential(nn.Linear(512 * 8 * 8, self.channels), + nn.ReLU(inplace=True)) + self.conv2 = nn.Sequential( + nn.Conv2d(in_channels=self.channels, out_channels=self.channels, kernel_size=1, padding=0), + nn.ReLU(inplace=True)) + self.initial_params() + + def forward(self, x): + n, c, h, w = x.size() + x = self.conv1(x) + x = self.pool(x) + x = x.view(n, -1) + x = self.fc(x) + x = x.view(n, self.channels, 1, 1) + x = self.conv2(x) + x = x.repeat(1, 1, h, w) + return x + + def initial_params(self, dev=0.01): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # print torch.sum(m.weight) + m.weight.data.normal_(0, dev) + if m.bias is not None: + m.bias.data.fill_(0) + elif isinstance(m, nn.ConvTranspose2d): + # print torch.sum(m.weight) + m.weight.data.normal_(0, dev) + if m.bias is not None: + m.bias.data.fill_(0) + elif isinstance(m, nn.Linear): + m.weight.data.normal_(0, dev) + + +if __name__ == '__main__': + net = DepthNet(depth=50, pretrained=True) + print(net) + inputs = torch.ones(4,3,128,128) + out = net(inputs) + print(out.size()) + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/spvcnn_classsification.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/spvcnn_classsification.py new file mode 100644 index 0000000000000000000000000000000000000000..f831544111aadc3ae5906eb0164f8596adc8c695 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/spvcnn_classsification.py @@ -0,0 +1,160 @@ +import torch.nn as nn +import torchsparse.nn as spnn +from torchsparse.point_tensor import PointTensor + +from lib.spvcnn_utils import * +__all__ = ['SPVCNN_CLASSIFICATION'] + + + +class BasicConvolutionBlock(nn.Module): + def __init__(self, inc, outc, ks=3, stride=1, dilation=1): + super().__init__() + self.net = nn.Sequential( + spnn.Conv3d(inc, + outc, + kernel_size=ks, + dilation=dilation, + stride=stride), + spnn.BatchNorm(outc), + spnn.ReLU(True)) + + def forward(self, x): + out = self.net(x) + return out + + +class BasicDeconvolutionBlock(nn.Module): + def __init__(self, inc, outc, ks=3, stride=1): + super().__init__() + self.net = nn.Sequential( + spnn.Conv3d(inc, + outc, + kernel_size=ks, + stride=stride, + transpose=True), + spnn.BatchNorm(outc), + spnn.ReLU(True)) + + def forward(self, x): + return self.net(x) + + +class ResidualBlock(nn.Module): + def __init__(self, inc, outc, ks=3, stride=1, dilation=1): + super().__init__() + self.net = nn.Sequential( + spnn.Conv3d(inc, + outc, + kernel_size=ks, + dilation=dilation, + stride=stride), spnn.BatchNorm(outc), + spnn.ReLU(True), + spnn.Conv3d(outc, + outc, + kernel_size=ks, + dilation=dilation, + stride=1), + spnn.BatchNorm(outc) + ) + + self.downsample = nn.Sequential() if (inc == outc and stride == 1) else \ + nn.Sequential( + spnn.Conv3d(inc, outc, kernel_size=1, dilation=1, stride=stride), + spnn.BatchNorm(outc) + ) + + self.relu = spnn.ReLU(True) + + def forward(self, x): + out = self.relu(self.net(x) + self.downsample(x)) + return out + + +class SPVCNN_CLASSIFICATION(nn.Module): + def __init__(self, **kwargs): + super().__init__() + + cr = kwargs.get('cr', 1.0) + cs = [32, 32, 64, 128, 256, 256, 128, 96, 96] + cs = [int(cr * x) for x in cs] + + if 'pres' in kwargs and 'vres' in kwargs: + self.pres = kwargs['pres'] + self.vres = kwargs['vres'] + + self.stem = nn.Sequential( + spnn.Conv3d(kwargs['input_channel'], cs[0], kernel_size=3, stride=1), + spnn.BatchNorm(cs[0]), + spnn.ReLU(True), + spnn.Conv3d(cs[0], cs[0], kernel_size=3, stride=1), + spnn.BatchNorm(cs[0]), + spnn.ReLU(True)) + + self.stage1 = nn.Sequential( + BasicConvolutionBlock(cs[0], cs[0], ks=2, stride=2, dilation=1), + ResidualBlock(cs[0], cs[1], ks=3, stride=1, dilation=1), + ResidualBlock(cs[1], cs[1], ks=3, stride=1, dilation=1), + ) + + self.stage2 = nn.Sequential( + BasicConvolutionBlock(cs[1], cs[1], ks=2, stride=2, dilation=1), + ResidualBlock(cs[1], cs[2], ks=3, stride=1, dilation=1), + ResidualBlock(cs[2], cs[2], ks=3, stride=1, dilation=1), + ) + + self.stage3 = nn.Sequential( + BasicConvolutionBlock(cs[2], cs[2], ks=2, stride=2, dilation=1), + ResidualBlock(cs[2], cs[3], ks=3, stride=1, dilation=1), + ResidualBlock(cs[3], cs[3], ks=3, stride=1, dilation=1), + ) + + self.stage4 = nn.Sequential( + BasicConvolutionBlock(cs[3], cs[3], ks=2, stride=2, dilation=1), + ResidualBlock(cs[3], cs[4], ks=3, stride=1, dilation=1), + ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1), + ) + self.avg_pool = spnn.GlobalAveragePooling() + self.classifier = nn.Sequential(nn.Linear(cs[4], kwargs['num_classes'])) + self.point_transforms = nn.ModuleList([ + nn.Sequential( + nn.Linear(cs[0], cs[4]), + nn.BatchNorm1d(cs[4]), + nn.ReLU(True), + ), + ]) + + self.weight_initialization() + self.dropout = nn.Dropout(0.3, True) + + def weight_initialization(self): + for m in self.modules(): + if isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + # x: SparseTensor z: PointTensor + z = PointTensor(x.F, x.C.float()) + + x0 = initial_voxelize(z, self.pres, self.vres) + + x0 = self.stem(x0) + z0 = voxel_to_point(x0, z, nearest=False) + z0.F = z0.F + + x1 = point_to_voxel(x0, z0) + x1 = self.stage1(x1) + x2 = self.stage2(x1) + x3 = self.stage3(x2) + x4 = self.stage4(x3) + z1 = voxel_to_point(x4, z0) + z1.F = z1.F + self.point_transforms[0](z0.F) + y1 = point_to_voxel(x4, z1) + pool = self.avg_pool(y1) + out = self.classifier(pool) + + + return out + + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/spvcnn_utils.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/spvcnn_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d6d1ce388d51933a8c34c541eaa7bc58e3014bf --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/spvcnn_utils.py @@ -0,0 +1,105 @@ +import torchsparse.nn.functional as spf +from torchsparse.point_tensor import PointTensor +from torchsparse.utils.kernel_region import * +from torchsparse.utils.helpers import * + + +__all__ = ['initial_voxelize', 'point_to_voxel', 'voxel_to_point'] + + +# z: PointTensor +# return: SparseTensor +def initial_voxelize(z, init_res, after_res): + new_float_coord = torch.cat( + [(z.C[:, :3] * init_res) / after_res, z.C[:, -1].view(-1, 1)], 1) + + pc_hash = spf.sphash(torch.floor(new_float_coord).int()) + sparse_hash = torch.unique(pc_hash) + idx_query = spf.sphashquery(pc_hash, sparse_hash) + counts = spf.spcount(idx_query.int(), len(sparse_hash)) + + inserted_coords = spf.spvoxelize(torch.floor(new_float_coord), idx_query, + counts) + inserted_coords = torch.round(inserted_coords).int() + inserted_feat = spf.spvoxelize(z.F, idx_query, counts) + + new_tensor = SparseTensor(inserted_feat, inserted_coords, 1) + new_tensor.check() + z.additional_features['idx_query'][1] = idx_query + z.additional_features['counts'][1] = counts + z.C = new_float_coord + + return new_tensor + + +# x: SparseTensor, z: PointTensor +# return: SparseTensor +def point_to_voxel(x, z): + if z.additional_features is None or z.additional_features.get('idx_query') is None\ + or z.additional_features['idx_query'].get(x.s) is None: + #pc_hash = hash_gpu(torch.floor(z.C).int()) + pc_hash = spf.sphash( + torch.cat([ + torch.floor(z.C[:, :3] / x.s).int() * x.s, + z.C[:, -1].int().view(-1, 1) + ], 1)) + sparse_hash = spf.sphash(x.C) + idx_query = spf.sphashquery(pc_hash, sparse_hash) + counts = spf.spcount(idx_query.int(), x.C.shape[0]) + z.additional_features['idx_query'][x.s] = idx_query + z.additional_features['counts'][x.s] = counts + else: + idx_query = z.additional_features['idx_query'][x.s] + counts = z.additional_features['counts'][x.s] + + inserted_feat = spf.spvoxelize(z.F, idx_query, counts) + new_tensor = SparseTensor(inserted_feat, x.C, x.s) + new_tensor.coord_maps = x.coord_maps + new_tensor.kernel_maps = x.kernel_maps + + return new_tensor + + +# x: SparseTensor, z: PointTensor +# return: PointTensor +def voxel_to_point(x, z, nearest=False): + if z.idx_query is None or z.weights is None or z.idx_query.get( + x.s) is None or z.weights.get(x.s) is None: + kr = KernelRegion(2, x.s, 1) + off = kr.get_kernel_offset().to(z.F.device) + #old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off) + old_hash = spf.sphash( + torch.cat([ + torch.floor(z.C[:, :3] / x.s).int() * x.s, + z.C[:, -1].int().view(-1, 1) + ], 1), off) + pc_hash = spf.sphash(x.C.to(z.F.device)) + idx_query = spf.sphashquery(old_hash, pc_hash) + weights = spf.calc_ti_weights(z.C, idx_query, + scale=x.s).transpose(0, 1).contiguous() + idx_query = idx_query.transpose(0, 1).contiguous() + if nearest: + weights[:, 1:] = 0. + idx_query[:, 1:] = -1 + new_feat = spf.spdevoxelize(x.F, idx_query, weights) + new_tensor = PointTensor(new_feat, + z.C, + idx_query=z.idx_query, + weights=z.weights) + new_tensor.additional_features = z.additional_features + new_tensor.idx_query[x.s] = idx_query + new_tensor.weights[x.s] = weights + z.idx_query[x.s] = idx_query + z.weights[x.s] = weights + + else: + new_feat = spf.spdevoxelize(x.F, z.idx_query.get(x.s), z.weights.get(x.s)) + new_tensor = PointTensor(new_feat, + z.C, + idx_query=z.idx_query, + weights=z.weights) + new_tensor.additional_features = z.additional_features + + return new_tensor + + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/test_utils.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2843629bb15f22e4168505bd2f0df9481a434749 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/leres/lib/test_utils.py @@ -0,0 +1,243 @@ +import os +import numpy as np +import torch +from torchsparse import SparseTensor +from torchsparse.utils import sparse_collate_fn, sparse_quantize +from plyfile import PlyData, PlyElement + + +def init_image_coor(height, width, u0=None, v0=None): + u0 = width / 2.0 if u0 is None else u0 + v0 = height / 2.0 if v0 is None else v0 + + x_row = np.arange(0, width) + x = np.tile(x_row, (height, 1)) + x = x.astype(np.float32) + u_u0 = x - u0 + + y_col = np.arange(0, height) + y = np.tile(y_col, (width, 1)).T + y = y.astype(np.float32) + v_v0 = y - v0 + return u_u0, v_v0 + +def depth_to_pcd(depth, u_u0, v_v0, f, invalid_value=0): + mask_invalid = depth <= invalid_value + depth[mask_invalid] = 0.0 + x = u_u0 / f * depth + y = v_v0 / f * depth + z = depth + pcd = np.stack([x, y, z], axis=2) + return pcd, ~mask_invalid + +def pcd_to_sparsetensor(pcd, mask_valid, voxel_size=0.01, num_points=100000): + pcd_valid = pcd[mask_valid] + block_ = pcd_valid + block = np.zeros_like(block_) + block[:, :3] = block_[:, :3] + + pc_ = np.round(block_[:, :3] / voxel_size) + pc_ -= pc_.min(0, keepdims=1) + feat_ = block + + # transfer point cloud to voxels + inds = sparse_quantize(pc_, + feat_, + return_index=True, + return_invs=False) + if len(inds) > num_points: + inds = np.random.choice(inds, num_points, replace=False) + + pc = pc_[inds] + feat = feat_[inds] + lidar = SparseTensor(feat, pc) + feed_dict = [{'lidar': lidar}] + inputs = sparse_collate_fn(feed_dict) + return inputs + +def pcd_uv_to_sparsetensor(pcd, u_u0, v_v0, mask_valid, f= 500.0, voxel_size=0.01, mask_side=None, num_points=100000): + if mask_side is not None: + mask_valid = mask_valid & mask_side + pcd_valid = pcd[mask_valid] + u_u0_valid = u_u0[mask_valid][:, np.newaxis] / f + v_v0_valid = v_v0[mask_valid][:, np.newaxis] / f + + block_ = np.concatenate([pcd_valid, u_u0_valid, v_v0_valid], axis=1) + block = np.zeros_like(block_) + block[:, :] = block_[:, :] + + + pc_ = np.round(block_[:, :3] / voxel_size) + pc_ -= pc_.min(0, keepdims=1) + feat_ = block + + # transfer point cloud to voxels + inds = sparse_quantize(pc_, + feat_, + return_index=True, + return_invs=False) + if len(inds) > num_points: + inds = np.random.choice(inds, num_points, replace=False) + + pc = pc_[inds] + feat = feat_[inds] + lidar = SparseTensor(feat, pc) + feed_dict = [{'lidar': lidar}] + inputs = sparse_collate_fn(feed_dict) + return inputs + + +def refine_focal_one_step(depth, focal, model, u0, v0): + # reconstruct PCD from depth + u_u0, v_v0 = init_image_coor(depth.shape[0], depth.shape[1], u0=u0, v0=v0) + pcd, mask_valid = depth_to_pcd(depth, u_u0, v_v0, f=focal, invalid_value=0) + # input for the voxelnet + feed_dict = pcd_uv_to_sparsetensor(pcd, u_u0, v_v0, mask_valid, f=focal, voxel_size=0.005, mask_side=None) + inputs = feed_dict['lidar'].cuda() + + outputs = model(inputs) + return outputs + +def refine_shift_one_step(depth_wshift, model, focal, u0, v0): + # reconstruct PCD from depth + u_u0, v_v0 = init_image_coor(depth_wshift.shape[0], depth_wshift.shape[1], u0=u0, v0=v0) + pcd_wshift, mask_valid = depth_to_pcd(depth_wshift, u_u0, v_v0, f=focal, invalid_value=0) + # input for the voxelnet + feed_dict = pcd_to_sparsetensor(pcd_wshift, mask_valid, voxel_size=0.01) + inputs = feed_dict['lidar'].cuda() + + outputs = model(inputs) + return outputs + +def refine_focal(depth, focal, model, u0, v0): + last_scale = 1 + focal_tmp = np.copy(focal) + for i in range(1): + scale = refine_focal_one_step(depth, focal_tmp, model, u0, v0) + focal_tmp = focal_tmp / scale.item() + last_scale = last_scale * scale + return torch.tensor([[last_scale]]) + +def refine_shift(depth_wshift, model, focal, u0, v0): + depth_wshift_tmp = np.copy(depth_wshift) + last_shift = 0 + for i in range(1): + shift = refine_shift_one_step(depth_wshift_tmp, model, focal, u0, v0) + shift = shift if shift.item() < 0.7 else torch.tensor([[0.7]]) + depth_wshift_tmp -= shift.item() + last_shift += shift.item() + return torch.tensor([[last_shift]]) + +def reconstruct_3D(depth, f): + """ + Reconstruct depth to 3D pointcloud with the provided focal length. + Return: + pcd: N X 3 array, point cloud + """ + cu = depth.shape[1] / 2 + cv = depth.shape[0] / 2 + width = depth.shape[1] + height = depth.shape[0] + row = np.arange(0, width, 1) + u = np.array([row for i in np.arange(height)]) + col = np.arange(0, height, 1) + v = np.array([col for i in np.arange(width)]) + v = v.transpose(1, 0) + + if f > 1e5: + print('Infinit focal length!!!') + x = u - cu + y = v - cv + z = depth / depth.max() * x.max() + else: + x = (u - cu) * depth / f + y = (v - cv) * depth / f + z = depth + + x = np.reshape(x, (width * height, 1)).astype(np.float) + y = np.reshape(y, (width * height, 1)).astype(np.float) + z = np.reshape(z, (width * height, 1)).astype(np.float) + pcd = np.concatenate((x, y, z), axis=1) + pcd = pcd.astype(np.int) + return pcd + +def save_point_cloud(pcd, rgb, filename, binary=True): + """Save an RGB point cloud as a PLY file. + + :paras + @pcd: Nx3 matrix, the XYZ coordinates + @rgb: NX3 matrix, the rgb colors for each 3D point + """ + assert pcd.shape[0] == rgb.shape[0] + + if rgb is None: + gray_concat = np.tile(np.array([128], dtype=np.uint8), (pcd.shape[0], 3)) + points_3d = np.hstack((pcd, gray_concat)) + else: + points_3d = np.hstack((pcd, rgb)) + python_types = (float, float, float, int, int, int) + npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), + ('blue', 'u1')] + if binary is True: + # Format into NumPy structured array + vertices = [] + for row_idx in range(points_3d.shape[0]): + cur_point = points_3d[row_idx] + vertices.append(tuple(dtype(point) for dtype, point in zip(python_types, cur_point))) + vertices_array = np.array(vertices, dtype=npy_types) + el = PlyElement.describe(vertices_array, 'vertex') + + # Write + PlyData([el]).write(filename) + else: + x = np.squeeze(points_3d[:, 0]) + y = np.squeeze(points_3d[:, 1]) + z = np.squeeze(points_3d[:, 2]) + r = np.squeeze(points_3d[:, 3]) + g = np.squeeze(points_3d[:, 4]) + b = np.squeeze(points_3d[:, 5]) + + ply_head = 'ply\n' \ + 'format ascii 1.0\n' \ + 'element vertex %d\n' \ + 'property float x\n' \ + 'property float y\n' \ + 'property float z\n' \ + 'property uchar red\n' \ + 'property uchar green\n' \ + 'property uchar blue\n' \ + 'end_header' % r.shape[0] + # ---- Save ply data to disk + np.savetxt(filename, np.column_stack((x, y, z, r, g, b)), fmt="%d %d %d %d %d %d", header=ply_head, comments='') + +def reconstruct_depth(depth, rgb, dir, pcd_name, focal): + """ + para disp: disparity, [h, w] + para rgb: rgb image, [h, w, 3], in rgb format + """ + rgb = np.squeeze(rgb) + depth = np.squeeze(depth) + + mask = depth < 1e-8 + depth[mask] = 0 + depth = depth / depth.max() * 10000 + + pcd = reconstruct_3D(depth, f=focal) + rgb_n = np.reshape(rgb, (-1, 3)) + save_point_cloud(pcd, rgb_n, os.path.join(dir, pcd_name + '.ply')) + + +def recover_metric_depth(pred, gt): + if type(pred).__module__ == torch.__name__: + pred = pred.cpu().numpy() + if type(gt).__module__ == torch.__name__: + gt = gt.cpu().numpy() + gt = gt.squeeze() + pred = pred.squeeze() + mask = (gt > 1e-8) & (pred > 1e-8) + + gt_mask = gt[mask] + pred_mask = pred[mask] + a, b = np.polyfit(pred_mask, gt_mask, deg=1) + pred_metric = a * pred + b + return pred_metric diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/beit.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/beit.py new file mode 100644 index 0000000000000000000000000000000000000000..9d6d1e0150b51b99ac4560c479c57de4eb1f976c --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/beit.py @@ -0,0 +1,203 @@ +import timm +import torch +import types + +import numpy as np +import torch.nn.functional as F + +from .utils import forward_adapted_unflatten, make_backbone_default +from timm.models.beit import gen_relative_position_index +from torch.utils.checkpoint import checkpoint +from typing import Optional + + +def forward_beit(pretrained, x): + return forward_adapted_unflatten(pretrained, x, "forward_features") + + +def patch_embed_forward(self, x): + """ + Modification of timm.models.layers.patch_embed.py: PatchEmbed.forward to support arbitrary window sizes. + """ + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + return x + + +def _get_rel_pos_bias(self, window_size): + """ + Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes. + """ + old_height = 2 * self.window_size[0] - 1 + old_width = 2 * self.window_size[1] - 1 + + new_height = 2 * window_size[0] - 1 + new_width = 2 * window_size[1] - 1 + + old_relative_position_bias_table = self.relative_position_bias_table + + old_num_relative_distance = self.num_relative_distance + new_num_relative_distance = new_height * new_width + 3 + + old_sub_table = old_relative_position_bias_table[:old_num_relative_distance - 3] + + old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2) + new_sub_table = F.interpolate(old_sub_table, size=(new_height, new_width), mode="bilinear") + new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1) + + new_relative_position_bias_table = torch.cat( + [new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3:]]) + + key = str(window_size[1]) + "," + str(window_size[0]) + if key not in self.relative_position_indices.keys(): + self.relative_position_indices[key] = gen_relative_position_index(window_size) + + relative_position_bias = new_relative_position_bias_table[ + self.relative_position_indices[key].view(-1)].view( + window_size[0] * window_size[1] + 1, + window_size[0] * window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + return relative_position_bias.unsqueeze(0) + + +def attention_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor] = None): + """ + Modification of timm.models.beit.py: Attention.forward to support arbitrary window sizes. + """ + B, N, C = x.shape + + qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + window_size = tuple(np.array(resolution) // 16) + attn = attn + self._get_rel_pos_bias(window_size) + if shared_rel_pos_bias is not None: + attn = attn + shared_rel_pos_bias + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +def block_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor] = None): + """ + Modification of timm.models.beit.py: Block.forward to support arbitrary window sizes. + """ + + if hasattr(self, 'drop_path1'): + drop_path_compat = self.drop_path1 + elif hasattr(self.target, 'drop_path'): + drop_path_compat = self.drop_path + else: + raise AttributeError("Neither drop_path1 nor drop_path exists on the target.") + + if self.gamma_1 is None: + x = x + drop_path_compat(self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias)) + x = x + drop_path_compat(self.mlp(self.norm2(x))) + else: + x = x + drop_path_compat(self.gamma_1 * self.attn(self.norm1(x), resolution, + shared_rel_pos_bias=shared_rel_pos_bias)) + x = x + drop_path_compat(self.gamma_2 * self.mlp(self.norm2(x))) + return x + +def beit_forward_features(self, x): + """ + Modification of timm.models.beit.py: Beit.forward_features to support arbitrary window sizes. + """ + resolution = x.shape[2:] + + x = self.patch_embed(x) + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias) + else: + x = blk(x, resolution, shared_rel_pos_bias=rel_pos_bias) + x = self.norm(x) + return x + + +def _make_beit_backbone( + model, + features=[96, 192, 384, 768], + size=[384, 384], + hooks=[0, 4, 8, 11], + vit_features=768, + use_readout="ignore", + start_index=1, + start_index_readout=1, +): + backbone = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index, + start_index_readout) + + backbone.model.patch_embed.forward = types.MethodType(patch_embed_forward, backbone.model.patch_embed) + backbone.model.forward_features = types.MethodType(beit_forward_features, backbone.model) + + for block in backbone.model.blocks: + attn = block.attn + attn._get_rel_pos_bias = types.MethodType(_get_rel_pos_bias, attn) + attn.forward = types.MethodType(attention_forward, attn) + attn.relative_position_indices = {} + + block.forward = types.MethodType(block_forward, block) + + return backbone + + +def _make_pretrained_beitl16_512(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("beit_large_patch16_512", pretrained=pretrained) + + hooks = [5, 11, 17, 23] if hooks is None else hooks + + features = [256, 512, 1024, 1024] + + return _make_beit_backbone( + model, + features=features, + size=[512, 512], + hooks=hooks, + vit_features=1024, + use_readout=use_readout, + ) + + +def _make_pretrained_beitl16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("beit_large_patch16_384", pretrained=pretrained) + + hooks = [5, 11, 17, 23] if hooks is None else hooks + return _make_beit_backbone( + model, + features=[256, 512, 1024, 1024], + hooks=hooks, + vit_features=1024, + use_readout=use_readout, + ) + + +def _make_pretrained_beitb16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("beit_base_patch16_384", pretrained=pretrained) + + hooks = [2, 5, 8, 11] if hooks is None else hooks + return _make_beit_backbone( + model, + features=[96, 192, 384, 768], + hooks=hooks, + use_readout=use_readout, + ) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/levit.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/levit.py new file mode 100644 index 0000000000000000000000000000000000000000..6d023a98702a0451806d26f33f8bccf931814f10 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/levit.py @@ -0,0 +1,106 @@ +import timm +import torch +import torch.nn as nn +import numpy as np + +from .utils import activations, get_activation, Transpose + + +def forward_levit(pretrained, x): + pretrained.model.forward_features(x) + + layer_1 = pretrained.activations["1"] + layer_2 = pretrained.activations["2"] + layer_3 = pretrained.activations["3"] + + layer_1 = pretrained.act_postprocess1(layer_1) + layer_2 = pretrained.act_postprocess2(layer_2) + layer_3 = pretrained.act_postprocess3(layer_3) + + return layer_1, layer_2, layer_3 + + +def _make_levit_backbone( + model, + hooks=[3, 11, 21], + patch_grid=[14, 14] +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) + + pretrained.activations = activations + + patch_grid_size = np.array(patch_grid, dtype=int) + + pretrained.act_postprocess1 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size(patch_grid_size.tolist())) + ) + pretrained.act_postprocess2 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size((np.ceil(patch_grid_size / 2).astype(int)).tolist())) + ) + pretrained.act_postprocess3 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size((np.ceil(patch_grid_size / 4).astype(int)).tolist())) + ) + + return pretrained + + +class ConvTransposeNorm(nn.Sequential): + """ + Modification of + https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/levit.py: ConvNorm + such that ConvTranspose2d is used instead of Conv2d. + """ + + def __init__( + self, in_chs, out_chs, kernel_size=1, stride=1, pad=0, dilation=1, + groups=1, bn_weight_init=1): + super().__init__() + self.add_module('c', + nn.ConvTranspose2d(in_chs, out_chs, kernel_size, stride, pad, dilation, groups, bias=False)) + self.add_module('bn', nn.BatchNorm2d(out_chs)) + + nn.init.constant_(self.bn.weight, bn_weight_init) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.ConvTranspose2d( + w.size(1), w.size(0), w.shape[2:], stride=self.c.stride, + padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +def stem_b4_transpose(in_chs, out_chs, activation): + """ + Modification of + https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/levit.py: stem_b16 + such that ConvTranspose2d is used instead of Conv2d and stem is also reduced to the half. + """ + return nn.Sequential( + ConvTransposeNorm(in_chs, out_chs, 3, 2, 1), + activation(), + ConvTransposeNorm(out_chs, out_chs // 2, 3, 2, 1), + activation()) + + +def _make_pretrained_levit_384(pretrained, hooks=None): + model = timm.create_model("levit_384", pretrained=pretrained) + + hooks = [3, 11, 21] if hooks == None else hooks + return _make_levit_backbone( + model, + hooks=hooks + ) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/next_vit.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/next_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..8afdd8b743b5ab023a359dc3b721e601b1a40d11 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/next_vit.py @@ -0,0 +1,39 @@ +import timm + +import torch.nn as nn + +from pathlib import Path +from .utils import activations, forward_default, get_activation + +from ..external.next_vit.classification.nextvit import * + + +def forward_next_vit(pretrained, x): + return forward_default(pretrained, x, "forward") + + +def _make_next_vit_backbone( + model, + hooks=[2, 6, 36, 39], +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.features[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.features[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.features[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.features[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + return pretrained + + +def _make_pretrained_next_vit_large_6m(hooks=None): + model = timm.create_model("nextvit_large") + + hooks = [2, 6, 36, 39] if hooks == None else hooks + return _make_next_vit_backbone( + model, + hooks=hooks, + ) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/swin.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/swin.py new file mode 100644 index 0000000000000000000000000000000000000000..f8c71367e3e78b087f80b2ab3e2f495a9c372f1a --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/swin.py @@ -0,0 +1,13 @@ +import timm + +from .swin_common import _make_swin_backbone + + +def _make_pretrained_swinl12_384(pretrained, hooks=None): + model = timm.create_model("swin_large_patch4_window12_384", pretrained=pretrained) + + hooks = [1, 1, 17, 1] if hooks == None else hooks + return _make_swin_backbone( + model, + hooks=hooks + ) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/swin2.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/swin2.py new file mode 100644 index 0000000000000000000000000000000000000000..ce4c8f1d6fc1807a207dc6b9a261c6f7b14a87a3 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/swin2.py @@ -0,0 +1,34 @@ +import timm + +from .swin_common import _make_swin_backbone + + +def _make_pretrained_swin2l24_384(pretrained, hooks=None): + model = timm.create_model("swinv2_large_window12to24_192to384_22kft1k", pretrained=pretrained) + + hooks = [1, 1, 17, 1] if hooks == None else hooks + return _make_swin_backbone( + model, + hooks=hooks + ) + + +def _make_pretrained_swin2b24_384(pretrained, hooks=None): + model = timm.create_model("swinv2_base_window12to24_192to384_22kft1k", pretrained=pretrained) + + hooks = [1, 1, 17, 1] if hooks == None else hooks + return _make_swin_backbone( + model, + hooks=hooks + ) + + +def _make_pretrained_swin2t16_256(pretrained, hooks=None): + model = timm.create_model("swinv2_tiny_window16_256", pretrained=pretrained) + + hooks = [1, 1, 5, 1] if hooks == None else hooks + return _make_swin_backbone( + model, + hooks=hooks, + patch_grid=[64, 64] + ) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/swin_common.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/swin_common.py new file mode 100644 index 0000000000000000000000000000000000000000..94d63d408f18511179d90b3ac6f697385d1e556d --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/swin_common.py @@ -0,0 +1,52 @@ +import torch + +import torch.nn as nn +import numpy as np + +from .utils import activations, forward_default, get_activation, Transpose + + +def forward_swin(pretrained, x): + return forward_default(pretrained, x) + + +def _make_swin_backbone( + model, + hooks=[1, 1, 17, 1], + patch_grid=[96, 96] +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.layers[0].blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.layers[1].blocks[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.layers[2].blocks[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.layers[3].blocks[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + if hasattr(model, "patch_grid"): + used_patch_grid = model.patch_grid + else: + used_patch_grid = patch_grid + + patch_grid_size = np.array(used_patch_grid, dtype=int) + + pretrained.act_postprocess1 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size(patch_grid_size.tolist())) + ) + pretrained.act_postprocess2 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size((patch_grid_size // 2).tolist())) + ) + pretrained.act_postprocess3 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size((patch_grid_size // 4).tolist())) + ) + pretrained.act_postprocess4 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size((patch_grid_size // 8).tolist())) + ) + + return pretrained diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/utils.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0558899dddcfccec5f01a764d4f21738eb612149 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/utils.py @@ -0,0 +1,249 @@ +import torch + +import torch.nn as nn + + +class Slice(nn.Module): + def __init__(self, start_index=1): + super(Slice, self).__init__() + self.start_index = start_index + + def forward(self, x): + return x[:, self.start_index:] + + +class AddReadout(nn.Module): + def __init__(self, start_index=1): + super(AddReadout, self).__init__() + self.start_index = start_index + + def forward(self, x): + if self.start_index == 2: + readout = (x[:, 0] + x[:, 1]) / 2 + else: + readout = x[:, 0] + return x[:, self.start_index:] + readout.unsqueeze(1) + + +class ProjectReadout(nn.Module): + def __init__(self, in_features, start_index=1): + super(ProjectReadout, self).__init__() + self.start_index = start_index + + self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU()) + + def forward(self, x): + readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index:]) + features = torch.cat((x[:, self.start_index:], readout), -1) + + return self.project(features) + + +class Transpose(nn.Module): + def __init__(self, dim0, dim1): + super(Transpose, self).__init__() + self.dim0 = dim0 + self.dim1 = dim1 + + def forward(self, x): + x = x.transpose(self.dim0, self.dim1) + return x + + +activations = {} + + +def get_activation(name): + def hook(model, input, output): + activations[name] = output + + return hook + + +def forward_default(pretrained, x, function_name="forward_features"): + exec(f"pretrained.model.{function_name}(x)") + + layer_1 = pretrained.activations["1"] + layer_2 = pretrained.activations["2"] + layer_3 = pretrained.activations["3"] + layer_4 = pretrained.activations["4"] + + if hasattr(pretrained, "act_postprocess1"): + layer_1 = pretrained.act_postprocess1(layer_1) + if hasattr(pretrained, "act_postprocess2"): + layer_2 = pretrained.act_postprocess2(layer_2) + if hasattr(pretrained, "act_postprocess3"): + layer_3 = pretrained.act_postprocess3(layer_3) + if hasattr(pretrained, "act_postprocess4"): + layer_4 = pretrained.act_postprocess4(layer_4) + + return layer_1, layer_2, layer_3, layer_4 + + +def forward_adapted_unflatten(pretrained, x, function_name="forward_features"): + b, c, h, w = x.shape + + exec(f"glob = pretrained.model.{function_name}(x)") + + layer_1 = pretrained.activations["1"] + layer_2 = pretrained.activations["2"] + layer_3 = pretrained.activations["3"] + layer_4 = pretrained.activations["4"] + + layer_1 = pretrained.act_postprocess1[0:2](layer_1) + layer_2 = pretrained.act_postprocess2[0:2](layer_2) + layer_3 = pretrained.act_postprocess3[0:2](layer_3) + layer_4 = pretrained.act_postprocess4[0:2](layer_4) + + unflatten = nn.Sequential( + nn.Unflatten( + 2, + torch.Size( + [ + h // pretrained.model.patch_size[1], + w // pretrained.model.patch_size[0], + ] + ), + ) + ) + + if layer_1.ndim == 3: + layer_1 = unflatten(layer_1) + if layer_2.ndim == 3: + layer_2 = unflatten(layer_2) + if layer_3.ndim == 3: + layer_3 = unflatten(layer_3) + if layer_4.ndim == 3: + layer_4 = unflatten(layer_4) + + layer_1 = pretrained.act_postprocess1[3: len(pretrained.act_postprocess1)](layer_1) + layer_2 = pretrained.act_postprocess2[3: len(pretrained.act_postprocess2)](layer_2) + layer_3 = pretrained.act_postprocess3[3: len(pretrained.act_postprocess3)](layer_3) + layer_4 = pretrained.act_postprocess4[3: len(pretrained.act_postprocess4)](layer_4) + + return layer_1, layer_2, layer_3, layer_4 + + +def get_readout_oper(vit_features, features, use_readout, start_index=1): + if use_readout == "ignore": + readout_oper = [Slice(start_index)] * len(features) + elif use_readout == "add": + readout_oper = [AddReadout(start_index)] * len(features) + elif use_readout == "project": + readout_oper = [ + ProjectReadout(vit_features, start_index) for out_feat in features + ] + else: + assert ( + False + ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'" + + return readout_oper + + +def make_backbone_default( + model, + features=[96, 192, 384, 768], + size=[384, 384], + hooks=[2, 5, 8, 11], + vit_features=768, + use_readout="ignore", + start_index=1, + start_index_readout=1, +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + readout_oper = get_readout_oper(vit_features, features, use_readout, start_index_readout) + + # 32, 48, 136, 384 + pretrained.act_postprocess1 = nn.Sequential( + readout_oper[0], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[0], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[0], + out_channels=features[0], + kernel_size=4, + stride=4, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess2 = nn.Sequential( + readout_oper[1], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[1], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[1], + out_channels=features[1], + kernel_size=2, + stride=2, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess3 = nn.Sequential( + readout_oper[2], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[2], + kernel_size=1, + stride=1, + padding=0, + ), + ) + + pretrained.act_postprocess4 = nn.Sequential( + readout_oper[3], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[3], + kernel_size=1, + stride=1, + padding=0, + ), + nn.Conv2d( + in_channels=features[3], + out_channels=features[3], + kernel_size=3, + stride=2, + padding=1, + ), + ) + + pretrained.model.start_index = start_index + pretrained.model.patch_size = [16, 16] + + return pretrained diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/vit.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/vit.py new file mode 100644 index 0000000000000000000000000000000000000000..413f9693bd4548342280e329c9128c1a52cea920 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/backbones/vit.py @@ -0,0 +1,221 @@ +import torch +import torch.nn as nn +import timm +import types +import math +import torch.nn.functional as F + +from .utils import (activations, forward_adapted_unflatten, get_activation, get_readout_oper, + make_backbone_default, Transpose) + + +def forward_vit(pretrained, x): + return forward_adapted_unflatten(pretrained, x, "forward_flex") + + +def _resize_pos_embed(self, posemb, gs_h, gs_w): + posemb_tok, posemb_grid = ( + posemb[:, : self.start_index], + posemb[0, self.start_index:], + ) + + gs_old = int(math.sqrt(len(posemb_grid))) + + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear") + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1) + + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + + return posemb + + +def forward_flex(self, x): + b, c, h, w = x.shape + + pos_embed = self._resize_pos_embed( + self.pos_embed, h // self.patch_size[1], w // self.patch_size[0] + ) + + B = x.shape[0] + + if hasattr(self.patch_embed, "backbone"): + x = self.patch_embed.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + x = self.patch_embed.proj(x).flatten(2).transpose(1, 2) + + if getattr(self, "dist_token", None) is not None: + cls_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole cls_tokens impl from Phil Wang, thanks + dist_token = self.dist_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, dist_token, x), dim=1) + else: + if self.no_embed_class: + x = x + pos_embed + cls_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + if not self.no_embed_class: + x = x + pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + + return x + + +def _make_vit_b16_backbone( + model, + features=[96, 192, 384, 768], + size=[384, 384], + hooks=[2, 5, 8, 11], + vit_features=768, + use_readout="ignore", + start_index=1, + start_index_readout=1, +): + pretrained = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index, + start_index_readout) + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) + pretrained.model._resize_pos_embed = types.MethodType( + _resize_pos_embed, pretrained.model + ) + + return pretrained + + +def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_large_patch16_384", pretrained=pretrained) + + hooks = [5, 11, 17, 23] if hooks == None else hooks + return _make_vit_b16_backbone( + model, + features=[256, 512, 1024, 1024], + hooks=hooks, + vit_features=1024, + use_readout=use_readout, + ) + + +def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_base_patch16_384", pretrained=pretrained) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout + ) + + +def _make_vit_b_rn50_backbone( + model, + features=[256, 512, 768, 768], + size=[384, 384], + hooks=[0, 1, 8, 11], + vit_features=768, + patch_size=[16, 16], + number_stages=2, + use_vit_only=False, + use_readout="ignore", + start_index=1, +): + pretrained = nn.Module() + + pretrained.model = model + + used_number_stages = 0 if use_vit_only else number_stages + for s in range(used_number_stages): + pretrained.model.patch_embed.backbone.stages[s].register_forward_hook( + get_activation(str(s + 1)) + ) + for s in range(used_number_stages, 4): + pretrained.model.blocks[hooks[s]].register_forward_hook(get_activation(str(s + 1))) + + pretrained.activations = activations + + readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) + + for s in range(used_number_stages): + value = nn.Sequential(nn.Identity(), nn.Identity(), nn.Identity()) + exec(f"pretrained.act_postprocess{s + 1}=value") + for s in range(used_number_stages, 4): + if s < number_stages: + final_layer = nn.ConvTranspose2d( + in_channels=features[s], + out_channels=features[s], + kernel_size=4 // (2 ** s), + stride=4 // (2 ** s), + padding=0, + bias=True, + dilation=1, + groups=1, + ) + elif s > number_stages: + final_layer = nn.Conv2d( + in_channels=features[3], + out_channels=features[3], + kernel_size=3, + stride=2, + padding=1, + ) + else: + final_layer = None + + layers = [ + readout_oper[s], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[s], + kernel_size=1, + stride=1, + padding=0, + ), + ] + if final_layer is not None: + layers.append(final_layer) + + value = nn.Sequential(*layers) + exec(f"pretrained.act_postprocess{s + 1}=value") + + pretrained.model.start_index = start_index + pretrained.model.patch_size = patch_size + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model._resize_pos_embed = types.MethodType( + _resize_pos_embed, pretrained.model + ) + + return pretrained + + +def _make_pretrained_vitb_rn50_384( + pretrained, use_readout="ignore", hooks=None, use_vit_only=False +): + model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained) + + hooks = [0, 1, 8, 11] if hooks == None else hooks + return _make_vit_b_rn50_backbone( + model, + features=[256, 512, 768, 768], + size=[384, 384], + hooks=hooks, + use_vit_only=use_vit_only, + use_readout=use_readout, + ) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/base_model.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf430239b47ec5ec07531263f26f5c24a2311cd --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/base_model.py @@ -0,0 +1,16 @@ +import torch + + +class BaseModel(torch.nn.Module): + def load(self, path): + """Load model from file. + + Args: + path (str): file path + """ + parameters = torch.load(path, map_location=torch.device('cpu')) + + if "optimizer" in parameters: + parameters = parameters["model"] + + self.load_state_dict(parameters) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/blocks.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..6d87a00680bb6ed9a6d7c3043ea30a1e90361794 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/blocks.py @@ -0,0 +1,439 @@ +import torch +import torch.nn as nn + +from .backbones.beit import ( + _make_pretrained_beitl16_512, + _make_pretrained_beitl16_384, + _make_pretrained_beitb16_384, + forward_beit, +) +from .backbones.swin_common import ( + forward_swin, +) +from .backbones.swin2 import ( + _make_pretrained_swin2l24_384, + _make_pretrained_swin2b24_384, + _make_pretrained_swin2t16_256, +) +from .backbones.swin import ( + _make_pretrained_swinl12_384, +) +from .backbones.levit import ( + _make_pretrained_levit_384, + forward_levit, +) +from .backbones.vit import ( + _make_pretrained_vitb_rn50_384, + _make_pretrained_vitl16_384, + _make_pretrained_vitb16_384, + forward_vit, +) + +def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, + use_vit_only=False, use_readout="ignore", in_features=[96, 256, 512, 1024]): + if backbone == "beitl16_512": + pretrained = _make_pretrained_beitl16_512( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [256, 512, 1024, 1024], features, groups=groups, expand=expand + ) # BEiT_512-L (backbone) + elif backbone == "beitl16_384": + pretrained = _make_pretrained_beitl16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [256, 512, 1024, 1024], features, groups=groups, expand=expand + ) # BEiT_384-L (backbone) + elif backbone == "beitb16_384": + pretrained = _make_pretrained_beitb16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [96, 192, 384, 768], features, groups=groups, expand=expand + ) # BEiT_384-B (backbone) + elif backbone == "swin2l24_384": + pretrained = _make_pretrained_swin2l24_384( + use_pretrained, hooks=hooks + ) + scratch = _make_scratch( + [192, 384, 768, 1536], features, groups=groups, expand=expand + ) # Swin2-L/12to24 (backbone) + elif backbone == "swin2b24_384": + pretrained = _make_pretrained_swin2b24_384( + use_pretrained, hooks=hooks + ) + scratch = _make_scratch( + [128, 256, 512, 1024], features, groups=groups, expand=expand + ) # Swin2-B/12to24 (backbone) + elif backbone == "swin2t16_256": + pretrained = _make_pretrained_swin2t16_256( + use_pretrained, hooks=hooks + ) + scratch = _make_scratch( + [96, 192, 384, 768], features, groups=groups, expand=expand + ) # Swin2-T/16 (backbone) + elif backbone == "swinl12_384": + pretrained = _make_pretrained_swinl12_384( + use_pretrained, hooks=hooks + ) + scratch = _make_scratch( + [192, 384, 768, 1536], features, groups=groups, expand=expand + ) # Swin-L/12 (backbone) + elif backbone == "next_vit_large_6m": + from .backbones.next_vit import _make_pretrained_next_vit_large_6m + pretrained = _make_pretrained_next_vit_large_6m(hooks=hooks) + scratch = _make_scratch( + in_features, features, groups=groups, expand=expand + ) # Next-ViT-L on ImageNet-1K-6M (backbone) + elif backbone == "levit_384": + pretrained = _make_pretrained_levit_384( + use_pretrained, hooks=hooks + ) + scratch = _make_scratch( + [384, 512, 768], features, groups=groups, expand=expand + ) # LeViT 384 (backbone) + elif backbone == "vitl16_384": + pretrained = _make_pretrained_vitl16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [256, 512, 1024, 1024], features, groups=groups, expand=expand + ) # ViT-L/16 - 85.0% Top1 (backbone) + elif backbone == "vitb_rn50_384": + pretrained = _make_pretrained_vitb_rn50_384( + use_pretrained, + hooks=hooks, + use_vit_only=use_vit_only, + use_readout=use_readout, + ) + scratch = _make_scratch( + [256, 512, 768, 768], features, groups=groups, expand=expand + ) # ViT-H/16 - 85.0% Top1 (backbone) + elif backbone == "vitb16_384": + pretrained = _make_pretrained_vitb16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [96, 192, 384, 768], features, groups=groups, expand=expand + ) # ViT-B/16 - 84.6% Top1 (backbone) + elif backbone == "resnext101_wsl": + pretrained = _make_pretrained_resnext101_wsl(use_pretrained) + scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3 + elif backbone == "efficientnet_lite3": + pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable) + scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3 + else: + print(f"Backbone '{backbone}' not implemented") + assert False + + return pretrained, scratch + + +def _make_scratch(in_shape, out_shape, groups=1, expand=False): + scratch = nn.Module() + + out_shape1 = out_shape + out_shape2 = out_shape + out_shape3 = out_shape + if len(in_shape) >= 4: + out_shape4 = out_shape + + if expand: + out_shape1 = out_shape + out_shape2 = out_shape*2 + out_shape3 = out_shape*4 + if len(in_shape) >= 4: + out_shape4 = out_shape*8 + + scratch.layer1_rn = nn.Conv2d( + in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer2_rn = nn.Conv2d( + in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer3_rn = nn.Conv2d( + in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + if len(in_shape) >= 4: + scratch.layer4_rn = nn.Conv2d( + in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + + return scratch + + +def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False): + efficientnet = torch.hub.load( + "rwightman/gen-efficientnet-pytorch", + "tf_efficientnet_lite3", + pretrained=use_pretrained, + exportable=exportable + ) + return _make_efficientnet_backbone(efficientnet) + + +def _make_efficientnet_backbone(effnet): + pretrained = nn.Module() + + pretrained.layer1 = nn.Sequential( + effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2] + ) + pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3]) + pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5]) + pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9]) + + return pretrained + + +def _make_resnet_backbone(resnet): + pretrained = nn.Module() + pretrained.layer1 = nn.Sequential( + resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1 + ) + + pretrained.layer2 = resnet.layer2 + pretrained.layer3 = resnet.layer3 + pretrained.layer4 = resnet.layer4 + + return pretrained + + +def _make_pretrained_resnext101_wsl(use_pretrained): + resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl") + return _make_resnet_backbone(resnet) + + + +class Interpolate(nn.Module): + """Interpolation module. + """ + + def __init__(self, scale_factor, mode, align_corners=False): + """Init. + + Args: + scale_factor (float): scaling + mode (str): interpolation mode + """ + super(Interpolate, self).__init__() + + self.interp = nn.functional.interpolate + self.scale_factor = scale_factor + self.mode = mode + self.align_corners = align_corners + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: interpolated data + """ + + x = self.interp( + x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners + ) + + return x + + +class ResidualConvUnit(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features): + """Init. + + Args: + features (int): number of features + """ + super().__init__() + + self.conv1 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True + ) + + self.conv2 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True + ) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + out = self.relu(x) + out = self.conv1(out) + out = self.relu(out) + out = self.conv2(out) + + return out + x + + +class FeatureFusionBlock(nn.Module): + """Feature fusion block. + """ + + def __init__(self, features): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock, self).__init__() + + self.resConfUnit1 = ResidualConvUnit(features) + self.resConfUnit2 = ResidualConvUnit(features) + + def forward(self, *xs): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + output += self.resConfUnit1(xs[1]) + + output = self.resConfUnit2(output) + + output = nn.functional.interpolate( + output, scale_factor=2, mode="bilinear", align_corners=True + ) + + return output + + + + +class ResidualConvUnit_custom(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features, activation, bn): + """Init. + + Args: + features (int): number of features + """ + super().__init__() + + self.bn = bn + + self.groups=1 + + self.conv1 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups + ) + + self.conv2 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups + ) + + if self.bn==True: + self.bn1 = nn.BatchNorm2d(features) + self.bn2 = nn.BatchNorm2d(features) + + self.activation = activation + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + + out = self.activation(x) + out = self.conv1(out) + if self.bn==True: + out = self.bn1(out) + + out = self.activation(out) + out = self.conv2(out) + if self.bn==True: + out = self.bn2(out) + + if self.groups > 1: + out = self.conv_merge(out) + + return self.skip_add.add(out, x) + + # return out + x + + +class FeatureFusionBlock_custom(nn.Module): + """Feature fusion block. + """ + + def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock_custom, self).__init__() + + self.deconv = deconv + self.align_corners = align_corners + + self.groups=1 + + self.expand = expand + out_features = features + if self.expand==True: + out_features = features//2 + + self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) + + self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) + self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) + + self.skip_add = nn.quantized.FloatFunctional() + + self.size=size + + def forward(self, *xs, size=None): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + res = self.resConfUnit1(xs[1]) + output = self.skip_add.add(output, res) + # output += res + + output = self.resConfUnit2(output) + + if (size is None) and (self.size is None): + modifier = {"scale_factor": 2} + elif size is None: + modifier = {"size": self.size} + else: + modifier = {"size": size} + + output = nn.functional.interpolate( + output, **modifier, mode="bilinear", align_corners=self.align_corners + ) + + output = self.out_conv(output) + + return output + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/dpt_depth.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/dpt_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..3129d09cb43a7c79b23916236991fabbedb78f55 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/dpt_depth.py @@ -0,0 +1,166 @@ +import torch +import torch.nn as nn + +from .base_model import BaseModel +from .blocks import ( + FeatureFusionBlock_custom, + Interpolate, + _make_encoder, + forward_beit, + forward_swin, + forward_levit, + forward_vit, +) +from .backbones.levit import stem_b4_transpose +from timm.models.layers import get_act_layer + + +def _make_fusion_block(features, use_bn, size = None): + return FeatureFusionBlock_custom( + features, + nn.ReLU(False), + deconv=False, + bn=use_bn, + expand=False, + align_corners=True, + size=size, + ) + + +class DPT(BaseModel): + def __init__( + self, + head, + features=256, + backbone="vitb_rn50_384", + readout="project", + channels_last=False, + use_bn=False, + **kwargs + ): + + super(DPT, self).__init__() + + self.channels_last = channels_last + + # For the Swin, Swin 2, LeViT and Next-ViT Transformers, the hierarchical architectures prevent setting the + # hooks freely. Instead, the hooks have to be chosen according to the ranges specified in the comments. + hooks = { + "beitl16_512": [5, 11, 17, 23], + "beitl16_384": [5, 11, 17, 23], + "beitb16_384": [2, 5, 8, 11], + "swin2l24_384": [1, 1, 17, 1], # Allowed ranges: [0, 1], [0, 1], [ 0, 17], [ 0, 1] + "swin2b24_384": [1, 1, 17, 1], # [0, 1], [0, 1], [ 0, 17], [ 0, 1] + "swin2t16_256": [1, 1, 5, 1], # [0, 1], [0, 1], [ 0, 5], [ 0, 1] + "swinl12_384": [1, 1, 17, 1], # [0, 1], [0, 1], [ 0, 17], [ 0, 1] + "next_vit_large_6m": [2, 6, 36, 39], # [0, 2], [3, 6], [ 7, 36], [37, 39] + "levit_384": [3, 11, 21], # [0, 3], [6, 11], [14, 21] + "vitb_rn50_384": [0, 1, 8, 11], + "vitb16_384": [2, 5, 8, 11], + "vitl16_384": [5, 11, 17, 23], + }[backbone] + + if "next_vit" in backbone: + in_features = { + "next_vit_large_6m": [96, 256, 512, 1024], + }[backbone] + else: + in_features = None + + # Instantiate backbone and reassemble blocks + self.pretrained, self.scratch = _make_encoder( + backbone, + features, + False, # Set to true of you want to train from scratch, uses ImageNet weights + groups=1, + expand=False, + exportable=False, + hooks=hooks, + use_readout=readout, + in_features=in_features, + ) + + self.number_layers = len(hooks) if hooks is not None else 4 + size_refinenet3 = None + self.scratch.stem_transpose = None + + if "beit" in backbone: + self.forward_transformer = forward_beit + elif "swin" in backbone: + self.forward_transformer = forward_swin + elif "next_vit" in backbone: + from .backbones.next_vit import forward_next_vit + self.forward_transformer = forward_next_vit + elif "levit" in backbone: + self.forward_transformer = forward_levit + size_refinenet3 = 7 + self.scratch.stem_transpose = stem_b4_transpose(256, 128, get_act_layer("hard_swish")) + else: + self.forward_transformer = forward_vit + + self.scratch.refinenet1 = _make_fusion_block(features, use_bn) + self.scratch.refinenet2 = _make_fusion_block(features, use_bn) + self.scratch.refinenet3 = _make_fusion_block(features, use_bn, size_refinenet3) + if self.number_layers >= 4: + self.scratch.refinenet4 = _make_fusion_block(features, use_bn) + + self.scratch.output_conv = head + + + def forward(self, x): + if self.channels_last == True: + x.contiguous(memory_format=torch.channels_last) + + layers = self.forward_transformer(self.pretrained, x) + if self.number_layers == 3: + layer_1, layer_2, layer_3 = layers + else: + layer_1, layer_2, layer_3, layer_4 = layers + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + if self.number_layers >= 4: + layer_4_rn = self.scratch.layer4_rn(layer_4) + + if self.number_layers == 3: + path_3 = self.scratch.refinenet3(layer_3_rn, size=layer_2_rn.shape[2:]) + else: + path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:]) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:]) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:]) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + if self.scratch.stem_transpose is not None: + path_1 = self.scratch.stem_transpose(path_1) + + out = self.scratch.output_conv(path_1) + + return out + + +class DPTDepthModel(DPT): + def __init__(self, path=None, non_negative=True, **kwargs): + features = kwargs["features"] if "features" in kwargs else 256 + head_features_1 = kwargs["head_features_1"] if "head_features_1" in kwargs else features + head_features_2 = kwargs["head_features_2"] if "head_features_2" in kwargs else 32 + kwargs.pop("head_features_1", None) + kwargs.pop("head_features_2", None) + + head = nn.Sequential( + nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1), + Interpolate(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0), + nn.ReLU(True) if non_negative else nn.Identity(), + nn.Identity(), + ) + + super().__init__(head, **kwargs) + + if path is not None: + self.load(path) + + def forward(self, x): + return super().forward(x).squeeze(dim=1) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/midas_net.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/midas_net.py new file mode 100644 index 0000000000000000000000000000000000000000..8a954977800b0a0f48807e80fa63041910e33c1f --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/midas_net.py @@ -0,0 +1,76 @@ +"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. +This file contains code that is adapted from +https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py +""" +import torch +import torch.nn as nn + +from .base_model import BaseModel +from .blocks import FeatureFusionBlock, Interpolate, _make_encoder + + +class MidasNet(BaseModel): + """Network for monocular depth estimation. + """ + + def __init__(self, path=None, features=256, non_negative=True): + """Init. + + Args: + path (str, optional): Path to saved model. Defaults to None. + features (int, optional): Number of features. Defaults to 256. + backbone (str, optional): Backbone network for encoder. Defaults to resnet50 + """ + print("Loading weights: ", path) + + super(MidasNet, self).__init__() + + use_pretrained = False if path is None else True + + self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained) + + self.scratch.refinenet4 = FeatureFusionBlock(features) + self.scratch.refinenet3 = FeatureFusionBlock(features) + self.scratch.refinenet2 = FeatureFusionBlock(features) + self.scratch.refinenet1 = FeatureFusionBlock(features) + + self.scratch.output_conv = nn.Sequential( + nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), + Interpolate(scale_factor=2, mode="bilinear"), + nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), + nn.ReLU(True) if non_negative else nn.Identity(), + ) + + if path: + self.load(path) + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input data (image) + + Returns: + tensor: depth + """ + + layer_1 = self.pretrained.layer1(x) + layer_2 = self.pretrained.layer2(layer_1) + layer_3 = self.pretrained.layer3(layer_2) + layer_4 = self.pretrained.layer4(layer_3) + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + path_4 = self.scratch.refinenet4(layer_4_rn) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv(path_1) + + return torch.squeeze(out, dim=1) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/midas_net_custom.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/midas_net_custom.py new file mode 100644 index 0000000000000000000000000000000000000000..50e4acb5e53d5fabefe3dde16ab49c33c2b7797c --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/midas_net_custom.py @@ -0,0 +1,128 @@ +"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. +This file contains code that is adapted from +https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py +""" +import torch +import torch.nn as nn + +from .base_model import BaseModel +from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder + + +class MidasNet_small(BaseModel): + """Network for monocular depth estimation. + """ + + def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True, + blocks={'expand': True}): + """Init. + + Args: + path (str, optional): Path to saved model. Defaults to None. + features (int, optional): Number of features. Defaults to 256. + backbone (str, optional): Backbone network for encoder. Defaults to resnet50 + """ + print("Loading weights: ", path) + + super(MidasNet_small, self).__init__() + + use_pretrained = False if path else True + + self.channels_last = channels_last + self.blocks = blocks + self.backbone = backbone + + self.groups = 1 + + features1=features + features2=features + features3=features + features4=features + self.expand = False + if "expand" in self.blocks and self.blocks['expand'] == True: + self.expand = True + features1=features + features2=features*2 + features3=features*4 + features4=features*8 + + self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) + + self.scratch.activation = nn.ReLU(False) + + self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) + self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) + self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) + self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) + + + self.scratch.output_conv = nn.Sequential( + nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups), + Interpolate(scale_factor=2, mode="bilinear"), + nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1), + self.scratch.activation, + nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), + nn.ReLU(True) if non_negative else nn.Identity(), + nn.Identity(), + ) + + if path: + self.load(path) + + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input data (image) + + Returns: + tensor: depth + """ + if self.channels_last==True: + print("self.channels_last = ", self.channels_last) + x.contiguous(memory_format=torch.channels_last) + + + layer_1 = self.pretrained.layer1(x) + layer_2 = self.pretrained.layer2(layer_1) + layer_3 = self.pretrained.layer3(layer_2) + layer_4 = self.pretrained.layer4(layer_3) + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + + path_4 = self.scratch.refinenet4(layer_4_rn) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv(path_1) + + return torch.squeeze(out, dim=1) + + + +def fuse_model(m): + prev_previous_type = nn.Identity() + prev_previous_name = '' + previous_type = nn.Identity() + previous_name = '' + for name, module in m.named_modules(): + if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU: + # print("FUSED ", prev_previous_name, previous_name, name) + torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True) + elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d: + # print("FUSED ", prev_previous_name, previous_name) + torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True) + # elif previous_type == nn.Conv2d and type(module) == nn.ReLU: + # print("FUSED ", previous_name, name) + # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True) + + prev_previous_type = previous_type + prev_previous_name = previous_name + previous_type = type(module) + previous_name = name \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/model_loader.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/model_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..f1cd1f2d43054bfd3d650587c7b2ed35f1347c9e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/model_loader.py @@ -0,0 +1,242 @@ +import cv2 +import torch + +from midas.dpt_depth import DPTDepthModel +from midas.midas_net import MidasNet +from midas.midas_net_custom import MidasNet_small +from midas.transforms import Resize, NormalizeImage, PrepareForNet + +from torchvision.transforms import Compose + +default_models = { + "dpt_beit_large_512": "weights/dpt_beit_large_512.pt", + "dpt_beit_large_384": "weights/dpt_beit_large_384.pt", + "dpt_beit_base_384": "weights/dpt_beit_base_384.pt", + "dpt_swin2_large_384": "weights/dpt_swin2_large_384.pt", + "dpt_swin2_base_384": "weights/dpt_swin2_base_384.pt", + "dpt_swin2_tiny_256": "weights/dpt_swin2_tiny_256.pt", + "dpt_swin_large_384": "weights/dpt_swin_large_384.pt", + "dpt_next_vit_large_384": "weights/dpt_next_vit_large_384.pt", + "dpt_levit_224": "weights/dpt_levit_224.pt", + "dpt_large_384": "weights/dpt_large_384.pt", + "dpt_hybrid_384": "weights/dpt_hybrid_384.pt", + "midas_v21_384": "weights/midas_v21_384.pt", + "midas_v21_small_256": "weights/midas_v21_small_256.pt", + "openvino_midas_v21_small_256": "weights/openvino_midas_v21_small_256.xml", +} + + +def load_model(device, model_path, model_type="dpt_large_384", optimize=True, height=None, square=False): + """Load the specified network. + + Args: + device (device): the torch device used + model_path (str): path to saved model + model_type (str): the type of the model to be loaded + optimize (bool): optimize the model to half-integer on CUDA? + height (int): inference encoder image height + square (bool): resize to a square resolution? + + Returns: + The loaded network, the transform which prepares images as input to the network and the dimensions of the + network input + """ + if "openvino" in model_type: + from openvino.runtime import Core + + keep_aspect_ratio = not square + + if model_type == "dpt_beit_large_512": + model = DPTDepthModel( + path=model_path, + backbone="beitl16_512", + non_negative=True, + ) + net_w, net_h = 512, 512 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_beit_large_384": + model = DPTDepthModel( + path=model_path, + backbone="beitl16_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_beit_base_384": + model = DPTDepthModel( + path=model_path, + backbone="beitb16_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_swin2_large_384": + model = DPTDepthModel( + path=model_path, + backbone="swin2l24_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + keep_aspect_ratio = False + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_swin2_base_384": + model = DPTDepthModel( + path=model_path, + backbone="swin2b24_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + keep_aspect_ratio = False + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_swin2_tiny_256": + model = DPTDepthModel( + path=model_path, + backbone="swin2t16_256", + non_negative=True, + ) + net_w, net_h = 256, 256 + keep_aspect_ratio = False + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_swin_large_384": + model = DPTDepthModel( + path=model_path, + backbone="swinl12_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + keep_aspect_ratio = False + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_next_vit_large_384": + model = DPTDepthModel( + path=model_path, + backbone="next_vit_large_6m", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + # We change the notation from dpt_levit_224 (MiDaS notation) to levit_384 (timm notation) here, where the 224 refers + # to the resolution 224x224 used by LeViT and 384 is the first entry of the embed_dim, see _cfg and model_cfgs of + # https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/levit.py + # (commit id: 927f031293a30afb940fff0bee34b85d9c059b0e) + elif model_type == "dpt_levit_224": + model = DPTDepthModel( + path=model_path, + backbone="levit_384", + non_negative=True, + head_features_1=64, + head_features_2=8, + ) + net_w, net_h = 224, 224 + keep_aspect_ratio = False + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_large_384": + model = DPTDepthModel( + path=model_path, + backbone="vitl16_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_hybrid_384": + model = DPTDepthModel( + path=model_path, + backbone="vitb_rn50_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "midas_v21_384": + model = MidasNet(model_path, non_negative=True) + net_w, net_h = 384, 384 + resize_mode = "upper_bound" + normalization = NormalizeImage( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ) + + elif model_type == "midas_v21_small_256": + model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, + non_negative=True, blocks={'expand': True}) + net_w, net_h = 256, 256 + resize_mode = "upper_bound" + normalization = NormalizeImage( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ) + + elif model_type == "openvino_midas_v21_small_256": + ie = Core() + uncompiled_model = ie.read_model(model=model_path) + model = ie.compile_model(uncompiled_model, "CPU") + net_w, net_h = 256, 256 + resize_mode = "upper_bound" + normalization = NormalizeImage( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ) + + else: + print(f"model_type '{model_type}' not implemented, use: --model_type large") + assert False + + if not "openvino" in model_type: + print("Model loaded, number of parameters = {:.0f}M".format(sum(p.numel() for p in model.parameters()) / 1e6)) + else: + print("Model loaded, optimized with OpenVINO") + + if "openvino" in model_type: + keep_aspect_ratio = False + + if height is not None: + net_w, net_h = height, height + + transform = Compose( + [ + Resize( + net_w, + net_h, + resize_target=None, + keep_aspect_ratio=keep_aspect_ratio, + ensure_multiple_of=32, + resize_method=resize_mode, + image_interpolation_method=cv2.INTER_CUBIC, + ), + normalization, + PrepareForNet(), + ] + ) + + if not "openvino" in model_type: + model.eval() + + if optimize and (device == torch.device("cuda")): + if not "openvino" in model_type: + model = model.to(memory_format=torch.channels_last) + model = model.half() + else: + print("Error: OpenVINO models are already optimized. No optimization to half-float possible.") + exit() + + if not "openvino" in model_type: + model.to(device) + + return model, transform, net_w, net_h diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/transforms.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..350cbc11662633ad7f8968eb10be2e7de6e384e9 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/transforms.py @@ -0,0 +1,234 @@ +import numpy as np +import cv2 +import math + + +def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): + """Rezise the sample to ensure the given size. Keeps aspect ratio. + + Args: + sample (dict): sample + size (tuple): image size + + Returns: + tuple: new size + """ + shape = list(sample["disparity"].shape) + + if shape[0] >= size[0] and shape[1] >= size[1]: + return sample + + scale = [0, 0] + scale[0] = size[0] / shape[0] + scale[1] = size[1] / shape[1] + + scale = max(scale) + + shape[0] = math.ceil(scale * shape[0]) + shape[1] = math.ceil(scale * shape[1]) + + # resize + sample["image"] = cv2.resize( + sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method + ) + + sample["disparity"] = cv2.resize( + sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST + ) + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + tuple(shape[::-1]), + interpolation=cv2.INTER_NEAREST, + ) + sample["mask"] = sample["mask"].astype(bool) + + return tuple(shape) + + +class Resize(object): + """Resize sample to given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_target=True, + keep_aspect_ratio=False, + ensure_multiple_of=1, + resize_method="lower_bound", + image_interpolation_method=cv2.INTER_AREA, + ): + """Init. + + Args: + width (int): desired output width + height (int): desired output height + resize_target (bool, optional): + True: Resize the full sample (image, mask, target). + False: Resize image only. + Defaults to True. + keep_aspect_ratio (bool, optional): + True: Keep the aspect ratio of the input sample. + Output sample might not have the given width and height, and + resize behaviour depends on the parameter 'resize_method'. + Defaults to False. + ensure_multiple_of (int, optional): + Output width and height is constrained to be multiple of this parameter. + Defaults to 1. + resize_method (str, optional): + "lower_bound": Output will be at least as large as the given size. + "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) + "minimal": Scale as least as possible. (Output size might be smaller than given size.) + Defaults to "lower_bound". + """ + self.__width = width + self.__height = height + + self.__resize_target = resize_target + self.__keep_aspect_ratio = keep_aspect_ratio + self.__multiple_of = ensure_multiple_of + self.__resize_method = resize_method + self.__image_interpolation_method = image_interpolation_method + + def constrain_to_multiple_of(self, x, min_val=0, max_val=None): + y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if max_val is not None and y > max_val: + y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if y < min_val: + y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) + + return y + + def get_size(self, width, height): + # determine new height and width + scale_height = self.__height / height + scale_width = self.__width / width + + if self.__keep_aspect_ratio: + if self.__resize_method == "lower_bound": + # scale such that output size is lower bound + if scale_width > scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "upper_bound": + # scale such that output size is upper bound + if scale_width < scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "minimal": + # scale as least as possbile + if abs(1 - scale_width) < abs(1 - scale_height): + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented" + ) + + if self.__resize_method == "lower_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, min_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, min_val=self.__width + ) + elif self.__resize_method == "upper_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, max_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, max_val=self.__width + ) + elif self.__resize_method == "minimal": + new_height = self.constrain_to_multiple_of(scale_height * height) + new_width = self.constrain_to_multiple_of(scale_width * width) + else: + raise ValueError(f"resize_method {self.__resize_method} not implemented") + + return (new_width, new_height) + + def __call__(self, sample): + width, height = self.get_size( + sample["image"].shape[1], sample["image"].shape[0] + ) + + # resize sample + sample["image"] = cv2.resize( + sample["image"], + (width, height), + interpolation=self.__image_interpolation_method, + ) + + if self.__resize_target: + if "disparity" in sample: + sample["disparity"] = cv2.resize( + sample["disparity"], + (width, height), + interpolation=cv2.INTER_NEAREST, + ) + + if "depth" in sample: + sample["depth"] = cv2.resize( + sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST + ) + + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + (width, height), + interpolation=cv2.INTER_NEAREST, + ) + sample["mask"] = sample["mask"].astype(bool) + + return sample + + +class NormalizeImage(object): + """Normlize image by given mean and std. + """ + + def __init__(self, mean, std): + self.__mean = mean + self.__std = std + + def __call__(self, sample): + sample["image"] = (sample["image"] - self.__mean) / self.__std + + return sample + + +class PrepareForNet(object): + """Prepare sample for usage as network input. + """ + + def __init__(self): + pass + + def __call__(self, sample): + image = np.transpose(sample["image"], (2, 0, 1)) + sample["image"] = np.ascontiguousarray(image).astype(np.float32) + + if "mask" in sample: + sample["mask"] = sample["mask"].astype(np.float32) + sample["mask"] = np.ascontiguousarray(sample["mask"]) + + if "disparity" in sample: + disparity = sample["disparity"].astype(np.float32) + sample["disparity"] = np.ascontiguousarray(disparity) + + if "depth" in sample: + depth = sample["depth"].astype(np.float32) + sample["depth"] = np.ascontiguousarray(depth) + + return sample diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/vit.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/vit.py new file mode 100644 index 0000000000000000000000000000000000000000..ea46b1be88b261b0dec04f3da0256f5f66f88a74 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/midas/vit.py @@ -0,0 +1,491 @@ +import torch +import torch.nn as nn +import timm +import types +import math +import torch.nn.functional as F + + +class Slice(nn.Module): + def __init__(self, start_index=1): + super(Slice, self).__init__() + self.start_index = start_index + + def forward(self, x): + return x[:, self.start_index :] + + +class AddReadout(nn.Module): + def __init__(self, start_index=1): + super(AddReadout, self).__init__() + self.start_index = start_index + + def forward(self, x): + if self.start_index == 2: + readout = (x[:, 0] + x[:, 1]) / 2 + else: + readout = x[:, 0] + return x[:, self.start_index :] + readout.unsqueeze(1) + + +class ProjectReadout(nn.Module): + def __init__(self, in_features, start_index=1): + super(ProjectReadout, self).__init__() + self.start_index = start_index + + self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU()) + + def forward(self, x): + readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :]) + features = torch.cat((x[:, self.start_index :], readout), -1) + + return self.project(features) + + +class Transpose(nn.Module): + def __init__(self, dim0, dim1): + super(Transpose, self).__init__() + self.dim0 = dim0 + self.dim1 = dim1 + + def forward(self, x): + x = x.transpose(self.dim0, self.dim1) + return x + + +def forward_vit(pretrained, x): + b, c, h, w = x.shape + + glob = pretrained.model.forward_flex(x) + + layer_1 = pretrained.activations["1"] + layer_2 = pretrained.activations["2"] + layer_3 = pretrained.activations["3"] + layer_4 = pretrained.activations["4"] + + layer_1 = pretrained.act_postprocess1[0:2](layer_1) + layer_2 = pretrained.act_postprocess2[0:2](layer_2) + layer_3 = pretrained.act_postprocess3[0:2](layer_3) + layer_4 = pretrained.act_postprocess4[0:2](layer_4) + + unflatten = nn.Sequential( + nn.Unflatten( + 2, + torch.Size( + [ + h // pretrained.model.patch_size[1], + w // pretrained.model.patch_size[0], + ] + ), + ) + ) + + if layer_1.ndim == 3: + layer_1 = unflatten(layer_1) + if layer_2.ndim == 3: + layer_2 = unflatten(layer_2) + if layer_3.ndim == 3: + layer_3 = unflatten(layer_3) + if layer_4.ndim == 3: + layer_4 = unflatten(layer_4) + + layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1) + layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2) + layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3) + layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4) + + return layer_1, layer_2, layer_3, layer_4 + + +def _resize_pos_embed(self, posemb, gs_h, gs_w): + posemb_tok, posemb_grid = ( + posemb[:, : self.start_index], + posemb[0, self.start_index :], + ) + + gs_old = int(math.sqrt(len(posemb_grid))) + + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear") + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1) + + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + + return posemb + + +def forward_flex(self, x): + b, c, h, w = x.shape + + pos_embed = self._resize_pos_embed( + self.pos_embed, h // self.patch_size[1], w // self.patch_size[0] + ) + + B = x.shape[0] + + if hasattr(self.patch_embed, "backbone"): + x = self.patch_embed.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + x = self.patch_embed.proj(x).flatten(2).transpose(1, 2) + + if getattr(self, "dist_token", None) is not None: + cls_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole cls_tokens impl from Phil Wang, thanks + dist_token = self.dist_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, dist_token, x), dim=1) + else: + cls_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + x = x + pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + + return x + + +activations = {} + + +def get_activation(name): + def hook(model, input, output): + activations[name] = output + + return hook + + +def get_readout_oper(vit_features, features, use_readout, start_index=1): + if use_readout == "ignore": + readout_oper = [Slice(start_index)] * len(features) + elif use_readout == "add": + readout_oper = [AddReadout(start_index)] * len(features) + elif use_readout == "project": + readout_oper = [ + ProjectReadout(vit_features, start_index) for out_feat in features + ] + else: + assert ( + False + ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'" + + return readout_oper + + +def _make_vit_b16_backbone( + model, + features=[96, 192, 384, 768], + size=[384, 384], + hooks=[2, 5, 8, 11], + vit_features=768, + use_readout="ignore", + start_index=1, +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) + + # 32, 48, 136, 384 + pretrained.act_postprocess1 = nn.Sequential( + readout_oper[0], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[0], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[0], + out_channels=features[0], + kernel_size=4, + stride=4, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess2 = nn.Sequential( + readout_oper[1], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[1], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[1], + out_channels=features[1], + kernel_size=2, + stride=2, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess3 = nn.Sequential( + readout_oper[2], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[2], + kernel_size=1, + stride=1, + padding=0, + ), + ) + + pretrained.act_postprocess4 = nn.Sequential( + readout_oper[3], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[3], + kernel_size=1, + stride=1, + padding=0, + ), + nn.Conv2d( + in_channels=features[3], + out_channels=features[3], + kernel_size=3, + stride=2, + padding=1, + ), + ) + + pretrained.model.start_index = start_index + pretrained.model.patch_size = [16, 16] + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) + pretrained.model._resize_pos_embed = types.MethodType( + _resize_pos_embed, pretrained.model + ) + + return pretrained + + +def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_large_patch16_384", pretrained=pretrained) + + hooks = [5, 11, 17, 23] if hooks == None else hooks + return _make_vit_b16_backbone( + model, + features=[256, 512, 1024, 1024], + hooks=hooks, + vit_features=1024, + use_readout=use_readout, + ) + + +def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_base_patch16_384", pretrained=pretrained) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout + ) + + +def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout + ) + + +def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model( + "vit_deit_base_distilled_patch16_384", pretrained=pretrained + ) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, + features=[96, 192, 384, 768], + hooks=hooks, + use_readout=use_readout, + start_index=2, + ) + + +def _make_vit_b_rn50_backbone( + model, + features=[256, 512, 768, 768], + size=[384, 384], + hooks=[0, 1, 8, 11], + vit_features=768, + use_vit_only=False, + use_readout="ignore", + start_index=1, +): + pretrained = nn.Module() + + pretrained.model = model + + if use_vit_only == True: + pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) + else: + pretrained.model.patch_embed.backbone.stages[0].register_forward_hook( + get_activation("1") + ) + pretrained.model.patch_embed.backbone.stages[1].register_forward_hook( + get_activation("2") + ) + + pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) + + if use_vit_only == True: + pretrained.act_postprocess1 = nn.Sequential( + readout_oper[0], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[0], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[0], + out_channels=features[0], + kernel_size=4, + stride=4, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess2 = nn.Sequential( + readout_oper[1], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[1], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[1], + out_channels=features[1], + kernel_size=2, + stride=2, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + else: + pretrained.act_postprocess1 = nn.Sequential( + nn.Identity(), nn.Identity(), nn.Identity() + ) + pretrained.act_postprocess2 = nn.Sequential( + nn.Identity(), nn.Identity(), nn.Identity() + ) + + pretrained.act_postprocess3 = nn.Sequential( + readout_oper[2], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[2], + kernel_size=1, + stride=1, + padding=0, + ), + ) + + pretrained.act_postprocess4 = nn.Sequential( + readout_oper[3], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[3], + kernel_size=1, + stride=1, + padding=0, + ), + nn.Conv2d( + in_channels=features[3], + out_channels=features[3], + kernel_size=3, + stride=2, + padding=1, + ), + ) + + pretrained.model.start_index = start_index + pretrained.model.patch_size = [16, 16] + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model._resize_pos_embed = types.MethodType( + _resize_pos_embed, pretrained.model + ) + + return pretrained + + +def _make_pretrained_vitb_rn50_384( + pretrained, use_readout="ignore", hooks=None, use_vit_only=False +): + model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained) + + hooks = [0, 1, 8, 11] if hooks == None else hooks + return _make_vit_b_rn50_backbone( + model, + features=[256, 512, 768, 768], + size=[384, 384], + hooks=hooks, + use_vit_only=use_vit_only, + use_readout=use_readout, + ) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/model_io.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/model_io.py new file mode 100644 index 0000000000000000000000000000000000000000..e4c76a8ccebb18f1dabc8b7aca1399af64d775d0 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/model_io.py @@ -0,0 +1,70 @@ +import os + +import torch + + +def save_weights(model, filename, path="./saved_models"): + os.makedirs(path, exist_ok=True) + + fpath = os.path.join(path, filename) + torch.save(model.state_dict(), fpath) + return + +def save_checkpoint(model, optimizer, epoch, filename, root="./checkpoints"): + if not os.path.isdir(root): + os.makedirs(root) + + fpath = os.path.join(root, filename) + torch.save( + { + "model": model.state_dict(), + "optimizer": optimizer.state_dict(), + "epoch": epoch + } + , fpath) + +def load_weights(model, filename, path="./saved_models"): + fpath = os.path.join(path, filename) + state_dict = torch.load(fpath) + model.load_state_dict(state_dict) + return model + +def load_checkpoint(fpath, model, optimizer=None): + ckpt = torch.load(fpath, map_location='cpu') + if ckpt is None: + raise Exception(f"\nERROR Loading AdaBins_nyu.pt. Read this for a fix:\nhttps://github.com/deforum-art/deforum-for-automatic1111-webui/wiki/FAQ-&-Troubleshooting#3d-animation-mode-is-not-working-only-2d-works") + if optimizer is None: + optimizer = ckpt.get('optimizer', None) + else: + optimizer.load_state_dict(ckpt['optimizer']) + epoch = ckpt['epoch'] + + if 'model' in ckpt: + ckpt = ckpt['model'] + load_dict = {} + for k, v in ckpt.items(): + if k.startswith('module.'): + k_ = k.replace('module.', '') + load_dict[k_] = v + else: + load_dict[k] = v + + modified = {} # backward compatibility to older naming of architecture blocks + for k, v in load_dict.items(): + if k.startswith('adaptive_bins_layer.embedding_conv.'): + k_ = k.replace('adaptive_bins_layer.embedding_conv.', + 'adaptive_bins_layer.conv3x3.') + modified[k_] = v + # del load_dict[k] + + elif k.startswith('adaptive_bins_layer.patch_transformer.embedding_encoder'): + + k_ = k.replace('adaptive_bins_layer.patch_transformer.embedding_encoder', + 'adaptive_bins_layer.patch_transformer.embedding_convPxP') + modified[k_] = v + # del load_dict[k] + else: + modified[k] = v # else keep the original + + model.load_state_dict(modified) + return model, optimizer, epoch \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/py3d_tools.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/py3d_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..5eb958607c4fd405a06bb67e33963e744fd2306f --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/py3d_tools.py @@ -0,0 +1,1801 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import sys +import math +import warnings +from typing import List, Optional, Sequence, Tuple, Union, Any + +import numpy as np +import torch +import torch.nn.functional as F + +import copy +import inspect +import torch.nn as nn + +Device = Union[str, torch.device] + +# Default values for rotation and translation matrices. +_R = torch.eye(3)[None] # (1, 3, 3) +_T = torch.zeros(1, 3) # (1, 3) + + +# Provide get_origin and get_args even in Python 3.7. + +if sys.version_info >= (3, 8, 0): + from typing import get_args, get_origin +elif sys.version_info >= (3, 7, 0): + + def get_origin(cls): # pragma: no cover + return getattr(cls, "__origin__", None) + + def get_args(cls): # pragma: no cover + return getattr(cls, "__args__", None) + + +else: + raise ImportError("This module requires Python 3.7+") + +################################################################ +## ██████╗██╗ █████╗ ███████╗███████╗███████╗███████╗ ## +## ██╔════╝██║ ██╔══██╗██╔════╝██╔════╝██╔════╝██╔════╝ ## +## ██║ ██║ ███████║███████╗███████╗█████╗ ███████╗ ## +## ██║ ██║ ██╔══██║╚════██║╚════██║██╔══╝ ╚════██║ ## +## ╚██████╗███████╗██║ ██║███████║███████║███████╗███████║ ## +## ╚═════╝╚══════╝╚═╝ ╚═╝╚══════╝╚══════╝╚══════╝╚══════╝ ## +################################################################ + +class Transform3d: + """ + A Transform3d object encapsulates a batch of N 3D transformations, and knows + how to transform points and normal vectors. Suppose that t is a Transform3d; + then we can do the following: + + .. code-block:: python + + N = len(t) + points = torch.randn(N, P, 3) + normals = torch.randn(N, P, 3) + points_transformed = t.transform_points(points) # => (N, P, 3) + normals_transformed = t.transform_normals(normals) # => (N, P, 3) + + + BROADCASTING + Transform3d objects supports broadcasting. Suppose that t1 and tN are + Transform3d objects with len(t1) == 1 and len(tN) == N respectively. Then we + can broadcast transforms like this: + + .. code-block:: python + + t1.transform_points(torch.randn(P, 3)) # => (P, 3) + t1.transform_points(torch.randn(1, P, 3)) # => (1, P, 3) + t1.transform_points(torch.randn(M, P, 3)) # => (M, P, 3) + tN.transform_points(torch.randn(P, 3)) # => (N, P, 3) + tN.transform_points(torch.randn(1, P, 3)) # => (N, P, 3) + + + COMBINING TRANSFORMS + Transform3d objects can be combined in two ways: composing and stacking. + Composing is function composition. Given Transform3d objects t1, t2, t3, + the following all compute the same thing: + + .. code-block:: python + + y1 = t3.transform_points(t2.transform_points(t1.transform_points(x))) + y2 = t1.compose(t2).compose(t3).transform_points(x) + y3 = t1.compose(t2, t3).transform_points(x) + + + Composing transforms should broadcast. + + .. code-block:: python + + if len(t1) == 1 and len(t2) == N, then len(t1.compose(t2)) == N. + + We can also stack a sequence of Transform3d objects, which represents + composition along the batch dimension; then the following should compute the + same thing. + + .. code-block:: python + + N, M = len(tN), len(tM) + xN = torch.randn(N, P, 3) + xM = torch.randn(M, P, 3) + y1 = torch.cat([tN.transform_points(xN), tM.transform_points(xM)], dim=0) + y2 = tN.stack(tM).transform_points(torch.cat([xN, xM], dim=0)) + + BUILDING TRANSFORMS + We provide convenience methods for easily building Transform3d objects + as compositions of basic transforms. + + .. code-block:: python + + # Scale by 0.5, then translate by (1, 2, 3) + t1 = Transform3d().scale(0.5).translate(1, 2, 3) + + # Scale each axis by a different amount, then translate, then scale + t2 = Transform3d().scale(1, 3, 3).translate(2, 3, 1).scale(2.0) + + t3 = t1.compose(t2) + tN = t1.stack(t3, t3) + + + BACKPROP THROUGH TRANSFORMS + When building transforms, we can also parameterize them by Torch tensors; + in this case we can backprop through the construction and application of + Transform objects, so they could be learned via gradient descent or + predicted by a neural network. + + .. code-block:: python + + s1_params = torch.randn(N, requires_grad=True) + t_params = torch.randn(N, 3, requires_grad=True) + s2_params = torch.randn(N, 3, requires_grad=True) + + t = Transform3d().scale(s1_params).translate(t_params).scale(s2_params) + x = torch.randn(N, 3) + y = t.transform_points(x) + loss = compute_loss(y) + loss.backward() + + with torch.no_grad(): + s1_params -= lr * s1_params.grad + t_params -= lr * t_params.grad + s2_params -= lr * s2_params.grad + + CONVENTIONS + We adopt a right-hand coordinate system, meaning that rotation about an axis + with a positive angle results in a counter clockwise rotation. + + This class assumes that transformations are applied on inputs which + are row vectors. The internal representation of the Nx4x4 transformation + matrix is of the form: + + .. code-block:: python + + M = [ + [Rxx, Ryx, Rzx, 0], + [Rxy, Ryy, Rzy, 0], + [Rxz, Ryz, Rzz, 0], + [Tx, Ty, Tz, 1], + ] + + To apply the transformation to points which are row vectors, the M matrix + can be pre multiplied by the points: + + .. code-block:: python + + points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point + transformed_points = points * M + + """ + + def __init__( + self, + dtype: torch.dtype = torch.float32, + device: Device = "cpu", + matrix: Optional[torch.Tensor] = None, + ) -> None: + """ + Args: + dtype: The data type of the transformation matrix. + to be used if `matrix = None`. + device: The device for storing the implemented transformation. + If `matrix != None`, uses the device of input `matrix`. + matrix: A tensor of shape (4, 4) or of shape (minibatch, 4, 4) + representing the 4x4 3D transformation matrix. + If `None`, initializes with identity using + the specified `device` and `dtype`. + """ + + if matrix is None: + self._matrix = torch.eye(4, dtype=dtype, device=device).view(1, 4, 4) + else: + if matrix.ndim not in (2, 3): + raise ValueError('"matrix" has to be a 2- or a 3-dimensional tensor.') + if matrix.shape[-2] != 4 or matrix.shape[-1] != 4: + raise ValueError( + '"matrix" has to be a tensor of shape (minibatch, 4, 4)' + ) + # set dtype and device from matrix + dtype = matrix.dtype + device = matrix.device + self._matrix = matrix.view(-1, 4, 4) + + self._transforms = [] # store transforms to compose + self._lu = None + self.device = make_device(device) + self.dtype = dtype + + def __len__(self) -> int: + return self.get_matrix().shape[0] + + def __getitem__( + self, index: Union[int, List[int], slice, torch.Tensor] + ) -> "Transform3d": + """ + Args: + index: Specifying the index of the transform to retrieve. + Can be an int, slice, list of ints, boolean, long tensor. + Supports negative indices. + + Returns: + Transform3d object with selected transforms. The tensors are not cloned. + """ + if isinstance(index, int): + index = [index] + return self.__class__(matrix=self.get_matrix()[index]) + + def compose(self, *others: "Transform3d") -> "Transform3d": + """ + Return a new Transform3d representing the composition of self with the + given other transforms, which will be stored as an internal list. + + Args: + *others: Any number of Transform3d objects + + Returns: + A new Transform3d with the stored transforms + """ + out = Transform3d(dtype=self.dtype, device=self.device) + out._matrix = self._matrix.clone() + for other in others: + if not isinstance(other, Transform3d): + msg = "Only possible to compose Transform3d objects; got %s" + raise ValueError(msg % type(other)) + out._transforms = self._transforms + list(others) + return out + + def get_matrix(self) -> torch.Tensor: + """ + Return a matrix which is the result of composing this transform + with others stored in self.transforms. Where necessary transforms + are broadcast against each other. + For example, if self.transforms contains transforms t1, t2, and t3, and + given a set of points x, the following should be true: + + .. code-block:: python + + y1 = t1.compose(t2, t3).transform(x) + y2 = t3.transform(t2.transform(t1.transform(x))) + y1.get_matrix() == y2.get_matrix() + + Returns: + A transformation matrix representing the composed inputs. + """ + composed_matrix = self._matrix.clone() + if len(self._transforms) > 0: + for other in self._transforms: + other_matrix = other.get_matrix() + composed_matrix = _broadcast_bmm(composed_matrix, other_matrix) + return composed_matrix + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + return torch.inverse(self._matrix) + + def inverse(self, invert_composed: bool = False) -> "Transform3d": + """ + Returns a new Transform3d object that represents an inverse of the + current transformation. + + Args: + invert_composed: + - True: First compose the list of stored transformations + and then apply inverse to the result. This is + potentially slower for classes of transformations + with inverses that can be computed efficiently + (e.g. rotations and translations). + - False: Invert the individual stored transformations + independently without composing them. + + Returns: + A new Transform3d object containing the inverse of the original + transformation. + """ + + tinv = Transform3d(dtype=self.dtype, device=self.device) + + if invert_composed: + # first compose then invert + tinv._matrix = torch.inverse(self.get_matrix()) + else: + # self._get_matrix_inverse() implements efficient inverse + # of self._matrix + i_matrix = self._get_matrix_inverse() + + # 2 cases: + if len(self._transforms) > 0: + # a) Either we have a non-empty list of transforms: + # Here we take self._matrix and append its inverse at the + # end of the reverted _transforms list. After composing + # the transformations with get_matrix(), this correctly + # right-multiplies by the inverse of self._matrix + # at the end of the composition. + tinv._transforms = [t.inverse() for t in reversed(self._transforms)] + last = Transform3d(dtype=self.dtype, device=self.device) + last._matrix = i_matrix + tinv._transforms.append(last) + else: + # b) Or there are no stored transformations + # we just set inverted matrix + tinv._matrix = i_matrix + + return tinv + + def stack(self, *others: "Transform3d") -> "Transform3d": + """ + Return a new batched Transform3d representing the batch elements from + self and all the given other transforms all batched together. + + Args: + *others: Any number of Transform3d objects + + Returns: + A new Transform3d. + """ + transforms = [self] + list(others) + matrix = torch.cat([t.get_matrix() for t in transforms], dim=0) + out = Transform3d(dtype=self.dtype, device=self.device) + out._matrix = matrix + return out + + def transform_points(self, points, eps: Optional[float] = None) -> torch.Tensor: + """ + Use this transform to transform a set of 3D points. Assumes row major + ordering of the input points. + + Args: + points: Tensor of shape (P, 3) or (N, P, 3) + eps: If eps!=None, the argument is used to clamp the + last coordinate before performing the final division. + The clamping corresponds to: + last_coord := (last_coord.sign() + (last_coord==0)) * + torch.clamp(last_coord.abs(), eps), + i.e. the last coordinates that are exactly 0 will + be clamped to +eps. + + Returns: + points_out: points of shape (N, P, 3) or (P, 3) depending + on the dimensions of the transform + """ + points_batch = points.clone() + if points_batch.dim() == 2: + points_batch = points_batch[None] # (P, 3) -> (1, P, 3) + if points_batch.dim() != 3: + msg = "Expected points to have dim = 2 or dim = 3: got shape %r" + raise ValueError(msg % repr(points.shape)) + + N, P, _3 = points_batch.shape + ones = torch.ones(N, P, 1, dtype=points.dtype, device=points.device) + points_batch = torch.cat([points_batch, ones], dim=2) + + composed_matrix = self.get_matrix() + points_out = _broadcast_bmm(points_batch, composed_matrix) + denom = points_out[..., 3:] # denominator + if eps is not None: + denom_sign = denom.sign() + (denom == 0.0).type_as(denom) + denom = denom_sign * torch.clamp(denom.abs(), eps) + points_out = points_out[..., :3] / denom + + # When transform is (1, 4, 4) and points is (P, 3) return + # points_out of shape (P, 3) + if points_out.shape[0] == 1 and points.dim() == 2: + points_out = points_out.reshape(points.shape) + + return points_out + + def transform_normals(self, normals) -> torch.Tensor: + """ + Use this transform to transform a set of normal vectors. + + Args: + normals: Tensor of shape (P, 3) or (N, P, 3) + + Returns: + normals_out: Tensor of shape (P, 3) or (N, P, 3) depending + on the dimensions of the transform + """ + if normals.dim() not in [2, 3]: + msg = "Expected normals to have dim = 2 or dim = 3: got shape %r" + raise ValueError(msg % (normals.shape,)) + composed_matrix = self.get_matrix() + + # TODO: inverse is bad! Solve a linear system instead + mat = composed_matrix[:, :3, :3] + normals_out = _broadcast_bmm(normals, mat.transpose(1, 2).inverse()) + + # This doesn't pass unit tests. TODO investigate further + # if self._lu is None: + # self._lu = self._matrix[:, :3, :3].transpose(1, 2).lu() + # normals_out = normals.lu_solve(*self._lu) + + # When transform is (1, 4, 4) and normals is (P, 3) return + # normals_out of shape (P, 3) + if normals_out.shape[0] == 1 and normals.dim() == 2: + normals_out = normals_out.reshape(normals.shape) + + return normals_out + + def translate(self, *args, **kwargs) -> "Transform3d": + return self.compose( + Translate(device=self.device, dtype=self.dtype, *args, **kwargs) + ) + + def scale(self, *args, **kwargs) -> "Transform3d": + return self.compose( + Scale(device=self.device, dtype=self.dtype, *args, **kwargs) + ) + + def rotate(self, *args, **kwargs) -> "Transform3d": + return self.compose( + Rotate(device=self.device, dtype=self.dtype, *args, **kwargs) + ) + + def rotate_axis_angle(self, *args, **kwargs) -> "Transform3d": + return self.compose( + RotateAxisAngle(device=self.device, dtype=self.dtype, *args, **kwargs) + ) + + def clone(self) -> "Transform3d": + """ + Deep copy of Transforms object. All internal tensors are cloned + individually. + + Returns: + new Transforms object. + """ + other = Transform3d(dtype=self.dtype, device=self.device) + if self._lu is not None: + other._lu = [elem.clone() for elem in self._lu] + other._matrix = self._matrix.clone() + other._transforms = [t.clone() for t in self._transforms] + return other + + def to( + self, + device: Device, + copy: bool = False, + dtype: Optional[torch.dtype] = None, + ) -> "Transform3d": + """ + Match functionality of torch.Tensor.to() + If copy = True or the self Tensor is on a different device, the + returned tensor is a copy of self with the desired torch.device. + If copy = False and the self Tensor already has the correct torch.device, + then self is returned. + + Args: + device: Device (as str or torch.device) for the new tensor. + copy: Boolean indicator whether or not to clone self. Default False. + dtype: If not None, casts the internal tensor variables + to a given torch.dtype. + + Returns: + Transform3d object. + """ + device_ = make_device(device) + dtype_ = self.dtype if dtype is None else dtype + skip_to = self.device == device_ and self.dtype == dtype_ + + if not copy and skip_to: + return self + + other = self.clone() + + if skip_to: + return other + + other.device = device_ + other.dtype = dtype_ + other._matrix = other._matrix.to(device=device_, dtype=dtype_) + other._transforms = [ + t.to(device_, copy=copy, dtype=dtype_) for t in other._transforms + ] + return other + + def cpu(self) -> "Transform3d": + return self.to("cpu") + + def cuda(self) -> "Transform3d": + return self.to("cuda") + +class Translate(Transform3d): + def __init__( + self, + x, + y=None, + z=None, + dtype: torch.dtype = torch.float32, + device: Optional[Device] = None, + ) -> None: + """ + Create a new Transform3d representing 3D translations. + + Option I: Translate(xyz, dtype=torch.float32, device='cpu') + xyz should be a tensor of shape (N, 3) + + Option II: Translate(x, y, z, dtype=torch.float32, device='cpu') + Here x, y, and z will be broadcast against each other and + concatenated to form the translation. Each can be: + - A python scalar + - A torch scalar + - A 1D torch tensor + """ + xyz = _handle_input(x, y, z, dtype, device, "Translate") + super().__init__(device=xyz.device, dtype=dtype) + N = xyz.shape[0] + + mat = torch.eye(4, dtype=dtype, device=self.device) + mat = mat.view(1, 4, 4).repeat(N, 1, 1) + mat[:, 3, :3] = xyz + self._matrix = mat + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + inv_mask = self._matrix.new_ones([1, 4, 4]) + inv_mask[0, 3, :3] = -1.0 + i_matrix = self._matrix * inv_mask + return i_matrix + +class Rotate(Transform3d): + def __init__( + self, + R: torch.Tensor, + dtype: torch.dtype = torch.float32, + device: Optional[Device] = None, + orthogonal_tol: float = 1e-5, + ) -> None: + """ + Create a new Transform3d representing 3D rotation using a rotation + matrix as the input. + + Args: + R: a tensor of shape (3, 3) or (N, 3, 3) + orthogonal_tol: tolerance for the test of the orthogonality of R + + """ + device_ = get_device(R, device) + super().__init__(device=device_, dtype=dtype) + if R.dim() == 2: + R = R[None] + if R.shape[-2:] != (3, 3): + msg = "R must have shape (3, 3) or (N, 3, 3); got %s" + raise ValueError(msg % repr(R.shape)) + R = R.to(device=device_, dtype=dtype) + _check_valid_rotation_matrix(R, tol=orthogonal_tol) + N = R.shape[0] + mat = torch.eye(4, dtype=dtype, device=device_) + mat = mat.view(1, 4, 4).repeat(N, 1, 1) + mat[:, :3, :3] = R + self._matrix = mat + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + return self._matrix.permute(0, 2, 1).contiguous() + +class TensorAccessor(nn.Module): + """ + A helper class to be used with the __getitem__ method. This can be used for + getting/setting the values for an attribute of a class at one particular + index. This is useful when the attributes of a class are batched tensors + and one element in the batch needs to be modified. + """ + + def __init__(self, class_object, index: Union[int, slice]) -> None: + """ + Args: + class_object: this should be an instance of a class which has + attributes which are tensors representing a batch of + values. + index: int/slice, an index indicating the position in the batch. + In __setattr__ and __getattr__ only the value of class + attributes at this index will be accessed. + """ + self.__dict__["class_object"] = class_object + self.__dict__["index"] = index + + def __setattr__(self, name: str, value: Any): + """ + Update the attribute given by `name` to the value given by `value` + at the index specified by `self.index`. + Args: + name: str, name of the attribute. + value: value to set the attribute to. + """ + v = getattr(self.class_object, name) + if not torch.is_tensor(v): + msg = "Can only set values on attributes which are tensors; got %r" + raise AttributeError(msg % type(v)) + + # Convert the attribute to a tensor if it is not a tensor. + if not torch.is_tensor(value): + value = torch.tensor( + value, device=v.device, dtype=v.dtype, requires_grad=v.requires_grad + ) + + # Check the shapes match the existing shape and the shape of the index. + if v.dim() > 1 and value.dim() > 1 and value.shape[1:] != v.shape[1:]: + msg = "Expected value to have shape %r; got %r" + raise ValueError(msg % (v.shape, value.shape)) + if ( + v.dim() == 0 + and isinstance(self.index, slice) + and len(value) != len(self.index) + ): + msg = "Expected value to have len %r; got %r" + raise ValueError(msg % (len(self.index), len(value))) + self.class_object.__dict__[name][self.index] = value + + def __getattr__(self, name: str): + """ + Return the value of the attribute given by "name" on self.class_object + at the index specified in self.index. + Args: + name: string of the attribute name + """ + if hasattr(self.class_object, name): + return self.class_object.__dict__[name][self.index] + else: + msg = "Attribute %s not found on %r" + return AttributeError(msg % (name, self.class_object.__name__)) + +BROADCAST_TYPES = (float, int, list, tuple, torch.Tensor, np.ndarray) + +class TensorProperties(nn.Module): + """ + A mix-in class for storing tensors as properties with helper methods. + """ + + def __init__( + self, + dtype: torch.dtype = torch.float32, + device: Device = "cpu", + **kwargs, + ) -> None: + """ + Args: + dtype: data type to set for the inputs + device: Device (as str or torch.device) + kwargs: any number of keyword arguments. Any arguments which are + of type (float/int/list/tuple/tensor/array) are broadcasted and + other keyword arguments are set as attributes. + """ + super().__init__() + self.device = make_device(device) + self._N = 0 + if kwargs is not None: + + # broadcast all inputs which are float/int/list/tuple/tensor/array + # set as attributes anything else e.g. strings, bools + args_to_broadcast = {} + for k, v in kwargs.items(): + if v is None or isinstance(v, (str, bool)): + setattr(self, k, v) + elif isinstance(v, BROADCAST_TYPES): + args_to_broadcast[k] = v + else: + msg = "Arg %s with type %r is not broadcastable" + warnings.warn(msg % (k, type(v))) + + names = args_to_broadcast.keys() + # convert from type dict.values to tuple + values = tuple(v for v in args_to_broadcast.values()) + + if len(values) > 0: + broadcasted_values = convert_to_tensors_and_broadcast( + *values, device=device + ) + + # Set broadcasted values as attributes on self. + for i, n in enumerate(names): + setattr(self, n, broadcasted_values[i]) + if self._N == 0: + self._N = broadcasted_values[i].shape[0] + + def __len__(self) -> int: + return self._N + + def isempty(self) -> bool: + return self._N == 0 + + def __getitem__(self, index: Union[int, slice]) -> TensorAccessor: + """ + Args: + index: an int or slice used to index all the fields. + Returns: + if `index` is an index int/slice return a TensorAccessor class + with getattribute/setattribute methods which return/update the value + at the index in the original class. + """ + if isinstance(index, (int, slice)): + return TensorAccessor(class_object=self, index=index) + + msg = "Expected index of type int or slice; got %r" + raise ValueError(msg % type(index)) + + # pyre-fixme[14]: `to` overrides method defined in `Module` inconsistently. + def to(self, device: Device = "cpu") -> "TensorProperties": + """ + In place operation to move class properties which are tensors to a + specified device. If self has a property "device", update this as well. + """ + device_ = make_device(device) + for k in dir(self): + v = getattr(self, k) + if k == "device": + setattr(self, k, device_) + if torch.is_tensor(v) and v.device != device_: + setattr(self, k, v.to(device_)) + return self + + def cpu(self) -> "TensorProperties": + return self.to("cpu") + + # pyre-fixme[14]: `cuda` overrides method defined in `Module` inconsistently. + def cuda(self, device: Optional[int] = None) -> "TensorProperties": + return self.to(f"cuda:{device}" if device is not None else "cuda") + + def clone(self, other) -> "TensorProperties": + """ + Update the tensor properties of other with the cloned properties of self. + """ + for k in dir(self): + v = getattr(self, k) + if inspect.ismethod(v) or k.startswith("__"): + continue + if torch.is_tensor(v): + v_clone = v.clone() + else: + v_clone = copy.deepcopy(v) + setattr(other, k, v_clone) + return other + + def gather_props(self, batch_idx) -> "TensorProperties": + """ + This is an in place operation to reformat all tensor class attributes + based on a set of given indices using torch.gather. This is useful when + attributes which are batched tensors e.g. shape (N, 3) need to be + multiplied with another tensor which has a different first dimension + e.g. packed vertices of shape (V, 3). + Example + .. code-block:: python + self.specular_color = (N, 3) tensor of specular colors for each mesh + A lighting calculation may use + .. code-block:: python + verts_packed = meshes.verts_packed() # (V, 3) + To multiply these two tensors the batch dimension needs to be the same. + To achieve this we can do + .. code-block:: python + batch_idx = meshes.verts_packed_to_mesh_idx() # (V) + This gives index of the mesh for each vertex in verts_packed. + .. code-block:: python + self.gather_props(batch_idx) + self.specular_color = (V, 3) tensor with the specular color for + each packed vertex. + torch.gather requires the index tensor to have the same shape as the + input tensor so this method takes care of the reshaping of the index + tensor to use with class attributes with arbitrary dimensions. + Args: + batch_idx: shape (B, ...) where `...` represents an arbitrary + number of dimensions + Returns: + self with all properties reshaped. e.g. a property with shape (N, 3) + is transformed to shape (B, 3). + """ + # Iterate through the attributes of the class which are tensors. + for k in dir(self): + v = getattr(self, k) + if torch.is_tensor(v): + if v.shape[0] > 1: + # There are different values for each batch element + # so gather these using the batch_idx. + # First clone the input batch_idx tensor before + # modifying it. + _batch_idx = batch_idx.clone() + idx_dims = _batch_idx.shape + tensor_dims = v.shape + if len(idx_dims) > len(tensor_dims): + msg = "batch_idx cannot have more dimensions than %s. " + msg += "got shape %r and %s has shape %r" + raise ValueError(msg % (k, idx_dims, k, tensor_dims)) + if idx_dims != tensor_dims: + # To use torch.gather the index tensor (_batch_idx) has + # to have the same shape as the input tensor. + new_dims = len(tensor_dims) - len(idx_dims) + new_shape = idx_dims + (1,) * new_dims + expand_dims = (-1,) + tensor_dims[1:] + _batch_idx = _batch_idx.view(*new_shape) + _batch_idx = _batch_idx.expand(*expand_dims) + + v = v.gather(0, _batch_idx) + setattr(self, k, v) + return self + +class CamerasBase(TensorProperties): + """ + `CamerasBase` implements a base class for all cameras. + For cameras, there are four different coordinate systems (or spaces) + - World coordinate system: This is the system the object lives - the world. + - Camera view coordinate system: This is the system that has its origin on the camera + and the and the Z-axis perpendicular to the image plane. + In PyTorch3D, we assume that +X points left, and +Y points up and + +Z points out from the image plane. + The transformation from world --> view happens after applying a rotation (R) + and translation (T) + - NDC coordinate system: This is the normalized coordinate system that confines + in a volume the rendered part of the object or scene. Also known as view volume. + For square images, given the PyTorch3D convention, (+1, +1, znear) + is the top left near corner, and (-1, -1, zfar) is the bottom right far + corner of the volume. + The transformation from view --> NDC happens after applying the camera + projection matrix (P) if defined in NDC space. + For non square images, we scale the points such that smallest side + has range [-1, 1] and the largest side has range [-u, u], with u > 1. + - Screen coordinate system: This is another representation of the view volume with + the XY coordinates defined in image space instead of a normalized space. + A better illustration of the coordinate systems can be found in + pytorch3d/docs/notes/cameras.md. + It defines methods that are common to all camera models: + - `get_camera_center` that returns the optical center of the camera in + world coordinates + - `get_world_to_view_transform` which returns a 3D transform from + world coordinates to the camera view coordinates (R, T) + - `get_full_projection_transform` which composes the projection + transform (P) with the world-to-view transform (R, T) + - `transform_points` which takes a set of input points in world coordinates and + projects to the space the camera is defined in (NDC or screen) + - `get_ndc_camera_transform` which defines the transform from screen/NDC to + PyTorch3D's NDC space + - `transform_points_ndc` which takes a set of points in world coordinates and + projects them to PyTorch3D's NDC space + - `transform_points_screen` which takes a set of points in world coordinates and + projects them to screen space + For each new camera, one should implement the `get_projection_transform` + routine that returns the mapping from camera view coordinates to camera + coordinates (NDC or screen). + Another useful function that is specific to each camera model is + `unproject_points` which sends points from camera coordinates (NDC or screen) + back to camera view or world coordinates depending on the `world_coordinates` + boolean argument of the function. + """ + + # Used in __getitem__ to index the relevant fields + # When creating a new camera, this should be set in the __init__ + _FIELDS: Tuple[str, ...] = () + + # Names of fields which are a constant property of the whole batch, rather + # than themselves a batch of data. + # When joining objects into a batch, they will have to agree. + _SHARED_FIELDS: Tuple[str, ...] = () + + def get_projection_transform(self): + """ + Calculate the projective transformation matrix. + Args: + **kwargs: parameters for the projection can be passed in as keyword + arguments to override the default values set in `__init__`. + Return: + a `Transform3d` object which represents a batch of projection + matrices of shape (N, 3, 3) + """ + raise NotImplementedError() + + def unproject_points(self, xy_depth: torch.Tensor, **kwargs): + """ + Transform input points from camera coodinates (NDC or screen) + to the world / camera coordinates. + Each of the input points `xy_depth` of shape (..., 3) is + a concatenation of the x, y location and its depth. + For instance, for an input 2D tensor of shape `(num_points, 3)` + `xy_depth` takes the following form: + `xy_depth[i] = [x[i], y[i], depth[i]]`, + for a each point at an index `i`. + The following example demonstrates the relationship between + `transform_points` and `unproject_points`: + .. code-block:: python + cameras = # camera object derived from CamerasBase + xyz = # 3D points of shape (batch_size, num_points, 3) + # transform xyz to the camera view coordinates + xyz_cam = cameras.get_world_to_view_transform().transform_points(xyz) + # extract the depth of each point as the 3rd coord of xyz_cam + depth = xyz_cam[:, :, 2:] + # project the points xyz to the camera + xy = cameras.transform_points(xyz)[:, :, :2] + # append depth to xy + xy_depth = torch.cat((xy, depth), dim=2) + # unproject to the world coordinates + xyz_unproj_world = cameras.unproject_points(xy_depth, world_coordinates=True) + print(torch.allclose(xyz, xyz_unproj_world)) # True + # unproject to the camera coordinates + xyz_unproj = cameras.unproject_points(xy_depth, world_coordinates=False) + print(torch.allclose(xyz_cam, xyz_unproj)) # True + Args: + xy_depth: torch tensor of shape (..., 3). + world_coordinates: If `True`, unprojects the points back to world + coordinates using the camera extrinsics `R` and `T`. + `False` ignores `R` and `T` and unprojects to + the camera view coordinates. + from_ndc: If `False` (default), assumes xy part of input is in + NDC space if self.in_ndc(), otherwise in screen space. If + `True`, assumes xy is in NDC space even if the camera + is defined in screen space. + Returns + new_points: unprojected points with the same shape as `xy_depth`. + """ + raise NotImplementedError() + + def get_camera_center(self, **kwargs) -> torch.Tensor: + """ + Return the 3D location of the camera optical center + in the world coordinates. + Args: + **kwargs: parameters for the camera extrinsics can be passed in + as keyword arguments to override the default values + set in __init__. + Setting T here will update the values set in init as this + value may be needed later on in the rendering pipeline e.g. for + lighting calculations. + Returns: + C: a batch of 3D locations of shape (N, 3) denoting + the locations of the center of each camera in the batch. + """ + w2v_trans = self.get_world_to_view_transform(**kwargs) + P = w2v_trans.inverse().get_matrix() + # the camera center is the translation component (the first 3 elements + # of the last row) of the inverted world-to-view + # transform (4x4 RT matrix) + C = P[:, 3, :3] + return C + + def get_world_to_view_transform(self, **kwargs) -> Transform3d: + """ + Return the world-to-view transform. + Args: + **kwargs: parameters for the camera extrinsics can be passed in + as keyword arguments to override the default values + set in __init__. + Setting R and T here will update the values set in init as these + values may be needed later on in the rendering pipeline e.g. for + lighting calculations. + Returns: + A Transform3d object which represents a batch of transforms + of shape (N, 3, 3) + """ + R: torch.Tensor = kwargs.get("R", self.R) + T: torch.Tensor = kwargs.get("T", self.T) + self.R = R # pyre-ignore[16] + self.T = T # pyre-ignore[16] + world_to_view_transform = get_world_to_view_transform(R=R, T=T) + return world_to_view_transform + + def get_full_projection_transform(self, **kwargs) -> Transform3d: + """ + Return the full world-to-camera transform composing the + world-to-view and view-to-camera transforms. + If camera is defined in NDC space, the projected points are in NDC space. + If camera is defined in screen space, the projected points are in screen space. + Args: + **kwargs: parameters for the projection transforms can be passed in + as keyword arguments to override the default values + set in __init__. + Setting R and T here will update the values set in init as these + values may be needed later on in the rendering pipeline e.g. for + lighting calculations. + Returns: + a Transform3d object which represents a batch of transforms + of shape (N, 3, 3) + """ + self.R: torch.Tensor = kwargs.get("R", self.R) # pyre-ignore[16] + self.T: torch.Tensor = kwargs.get("T", self.T) # pyre-ignore[16] + world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T) + view_to_proj_transform = self.get_projection_transform(**kwargs) + return world_to_view_transform.compose(view_to_proj_transform) + + def transform_points( + self, points, eps: Optional[float] = None, **kwargs + ) -> torch.Tensor: + """ + Transform input points from world to camera space with the + projection matrix defined by the camera. + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the camera plane. + Args: + points: torch tensor of shape (..., 3). + eps: If eps!=None, the argument is used to clamp the + divisor in the homogeneous normalization of the points + transformed to the ndc space. Please see + `transforms.Transform3d.transform_points` for details. + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the + camera plane. + Returns + new_points: transformed points with the same shape as the input. + """ + world_to_proj_transform = self.get_full_projection_transform(**kwargs) + return world_to_proj_transform.transform_points(points, eps=eps) + + def get_ndc_camera_transform(self, **kwargs) -> Transform3d: + """ + Returns the transform from camera projection space (screen or NDC) to NDC space. + For cameras that can be specified in screen space, this transform + allows points to be converted from screen to NDC space. + The default transform scales the points from [0, W]x[0, H] + to [-1, 1]x[-u, u] or [-u, u]x[-1, 1] where u > 1 is the aspect ratio of the image. + This function should be modified per camera definitions if need be, + e.g. for Perspective/Orthographic cameras we provide a custom implementation. + This transform assumes PyTorch3D coordinate system conventions for + both the NDC space and the input points. + This transform interfaces with the PyTorch3D renderer which assumes + input points to the renderer to be in NDC space. + """ + if self.in_ndc(): + return Transform3d(device=self.device, dtype=torch.float32) + else: + # For custom cameras which can be defined in screen space, + # users might might have to implement the screen to NDC transform based + # on the definition of the camera parameters. + # See PerspectiveCameras/OrthographicCameras for an example. + # We don't flip xy because we assume that world points are in + # PyTorch3D coordinates, and thus conversion from screen to ndc + # is a mere scaling from image to [-1, 1] scale. + image_size = kwargs.get("image_size", self.get_image_size()) + return get_screen_to_ndc_transform( + self, with_xyflip=False, image_size=image_size + ) + + def transform_points_ndc( + self, points, eps: Optional[float] = None, **kwargs + ) -> torch.Tensor: + """ + Transforms points from PyTorch3D world/camera space to NDC space. + Input points follow the PyTorch3D coordinate system conventions: +X left, +Y up. + Output points are in NDC space: +X left, +Y up, origin at image center. + Args: + points: torch tensor of shape (..., 3). + eps: If eps!=None, the argument is used to clamp the + divisor in the homogeneous normalization of the points + transformed to the ndc space. Please see + `transforms.Transform3d.transform_points` for details. + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the + camera plane. + Returns + new_points: transformed points with the same shape as the input. + """ + world_to_ndc_transform = self.get_full_projection_transform(**kwargs) + if not self.in_ndc(): + to_ndc_transform = self.get_ndc_camera_transform(**kwargs) + world_to_ndc_transform = world_to_ndc_transform.compose(to_ndc_transform) + + return world_to_ndc_transform.transform_points(points, eps=eps) + + def transform_points_screen( + self, points, eps: Optional[float] = None, **kwargs + ) -> torch.Tensor: + """ + Transforms points from PyTorch3D world/camera space to screen space. + Input points follow the PyTorch3D coordinate system conventions: +X left, +Y up. + Output points are in screen space: +X right, +Y down, origin at top left corner. + Args: + points: torch tensor of shape (..., 3). + eps: If eps!=None, the argument is used to clamp the + divisor in the homogeneous normalization of the points + transformed to the ndc space. Please see + `transforms.Transform3d.transform_points` for details. + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the + camera plane. + Returns + new_points: transformed points with the same shape as the input. + """ + points_ndc = self.transform_points_ndc(points, eps=eps, **kwargs) + image_size = kwargs.get("image_size", self.get_image_size()) + return get_ndc_to_screen_transform( + self, with_xyflip=True, image_size=image_size + ).transform_points(points_ndc, eps=eps) + + def clone(self): + """ + Returns a copy of `self`. + """ + cam_type = type(self) + other = cam_type(device=self.device) + return super().clone(other) + + def is_perspective(self): + raise NotImplementedError() + + def in_ndc(self): + """ + Specifies whether the camera is defined in NDC space + or in screen (image) space + """ + raise NotImplementedError() + + def get_znear(self): + return self.znear if hasattr(self, "znear") else None + + def get_image_size(self): + """ + Returns the image size, if provided, expected in the form of (height, width) + The image size is used for conversion of projected points to screen coordinates. + """ + return self.image_size if hasattr(self, "image_size") else None + + def __getitem__( + self, index: Union[int, List[int], torch.LongTensor] + ) -> "CamerasBase": + """ + Override for the __getitem__ method in TensorProperties which needs to be + refactored. + Args: + index: an int/list/long tensor used to index all the fields in the cameras given by + self._FIELDS. + Returns: + if `index` is an index int/list/long tensor return an instance of the current + cameras class with only the values at the selected index. + """ + + kwargs = {} + + if not isinstance(index, (int, list, torch.LongTensor, torch.cuda.LongTensor)): + msg = "Invalid index type, expected int, List[int] or torch.LongTensor; got %r" + raise ValueError(msg % type(index)) + + if isinstance(index, int): + index = [index] + + if max(index) >= len(self): + raise ValueError(f"Index {max(index)} is out of bounds for select cameras") + + for field in self._FIELDS: + val = getattr(self, field, None) + if val is None: + continue + + # e.g. "in_ndc" is set as attribute "_in_ndc" on the class + # but provided as "in_ndc" on initialization + if field.startswith("_"): + field = field[1:] + + if isinstance(val, (str, bool)): + kwargs[field] = val + elif isinstance(val, torch.Tensor): + # In the init, all inputs will be converted to + # tensors before setting as attributes + kwargs[field] = val[index] + else: + raise ValueError(f"Field {field} type is not supported for indexing") + + kwargs["device"] = self.device + return self.__class__(**kwargs) + +class FoVPerspectiveCameras(CamerasBase): + """ + A class which stores a batch of parameters to generate a batch of + projection matrices by specifying the field of view. + The definition of the parameters follow the OpenGL perspective camera. + + The extrinsics of the camera (R and T matrices) can also be set in the + initializer or passed in to `get_full_projection_transform` to get + the full transformation from world -> ndc. + + The `transform_points` method calculates the full world -> ndc transform + and then applies it to the input points. + + The transforms can also be returned separately as Transform3d objects. + + * Setting the Aspect Ratio for Non Square Images * + + If the desired output image size is non square (i.e. a tuple of (H, W) where H != W) + the aspect ratio needs special consideration: There are two aspect ratios + to be aware of: + - the aspect ratio of each pixel + - the aspect ratio of the output image + The `aspect_ratio` setting in the FoVPerspectiveCameras sets the + pixel aspect ratio. When using this camera with the differentiable rasterizer + be aware that in the rasterizer we assume square pixels, but allow + variable image aspect ratio (i.e rectangle images). + + In most cases you will want to set the camera `aspect_ratio=1.0` + (i.e. square pixels) and only vary the output image dimensions in pixels + for rasterization. + """ + + # For __getitem__ + _FIELDS = ( + "K", + "znear", + "zfar", + "aspect_ratio", + "fov", + "R", + "T", + "degrees", + ) + + _SHARED_FIELDS = ("degrees",) + + def __init__( + self, + znear=1.0, + zfar=100.0, + aspect_ratio=1.0, + fov=60.0, + degrees: bool = True, + R: torch.Tensor = _R, + T: torch.Tensor = _T, + K: Optional[torch.Tensor] = None, + device: Device = "cpu", + ) -> None: + """ + + Args: + znear: near clipping plane of the view frustrum. + zfar: far clipping plane of the view frustrum. + aspect_ratio: aspect ratio of the image pixels. + 1.0 indicates square pixels. + fov: field of view angle of the camera. + degrees: bool, set to True if fov is specified in degrees. + R: Rotation matrix of shape (N, 3, 3) + T: Translation matrix of shape (N, 3) + K: (optional) A calibration matrix of shape (N, 4, 4) + If provided, don't need znear, zfar, fov, aspect_ratio, degrees + device: Device (as str or torch.device) + """ + # The initializer formats all inputs to torch tensors and broadcasts + # all the inputs to have the same batch dimension where necessary. + super().__init__( + device=device, + znear=znear, + zfar=zfar, + aspect_ratio=aspect_ratio, + fov=fov, + R=R, + T=T, + K=K, + ) + + # No need to convert to tensor or broadcast. + self.degrees = degrees + + def compute_projection_matrix( + self, znear, zfar, fov, aspect_ratio, degrees: bool + ) -> torch.Tensor: + """ + Compute the calibration matrix K of shape (N, 4, 4) + + Args: + znear: near clipping plane of the view frustrum. + zfar: far clipping plane of the view frustrum. + fov: field of view angle of the camera. + aspect_ratio: aspect ratio of the image pixels. + 1.0 indicates square pixels. + degrees: bool, set to True if fov is specified in degrees. + + Returns: + torch.FloatTensor of the calibration matrix with shape (N, 4, 4) + """ + K = torch.zeros((self._N, 4, 4), device=self.device, dtype=torch.float32) + ones = torch.ones((self._N), dtype=torch.float32, device=self.device) + if degrees: + fov = (np.pi / 180) * fov + + if not torch.is_tensor(fov): + fov = torch.tensor(fov, device=self.device) + tanHalfFov = torch.tan((fov / 2)) + max_y = tanHalfFov * znear + min_y = -max_y + max_x = max_y * aspect_ratio + min_x = -max_x + + # NOTE: In OpenGL the projection matrix changes the handedness of the + # coordinate frame. i.e the NDC space positive z direction is the + # camera space negative z direction. This is because the sign of the z + # in the projection matrix is set to -1.0. + # In pytorch3d we maintain a right handed coordinate system throughout + # so the so the z sign is 1.0. + z_sign = 1.0 + + K[:, 0, 0] = 2.0 * znear / (max_x - min_x) + K[:, 1, 1] = 2.0 * znear / (max_y - min_y) + K[:, 0, 2] = (max_x + min_x) / (max_x - min_x) + K[:, 1, 2] = (max_y + min_y) / (max_y - min_y) + K[:, 3, 2] = z_sign * ones + + # NOTE: This maps the z coordinate from [0, 1] where z = 0 if the point + # is at the near clipping plane and z = 1 when the point is at the far + # clipping plane. + K[:, 2, 2] = z_sign * zfar / (zfar - znear) + K[:, 2, 3] = -(zfar * znear) / (zfar - znear) + + return K + + def get_projection_transform(self, **kwargs) -> Transform3d: + """ + Calculate the perspective projection matrix with a symmetric + viewing frustrum. Use column major order. + The viewing frustrum will be projected into ndc, s.t. + (max_x, max_y) -> (+1, +1) + (min_x, min_y) -> (-1, -1) + + Args: + **kwargs: parameters for the projection can be passed in as keyword + arguments to override the default values set in `__init__`. + + Return: + a Transform3d object which represents a batch of projection + matrices of shape (N, 4, 4) + + .. code-block:: python + + h1 = (max_y + min_y)/(max_y - min_y) + w1 = (max_x + min_x)/(max_x - min_x) + tanhalffov = tan((fov/2)) + s1 = 1/tanhalffov + s2 = 1/(tanhalffov * (aspect_ratio)) + + # To map z to the range [0, 1] use: + f1 = far / (far - near) + f2 = -(far * near) / (far - near) + + # Projection matrix + K = [ + [s1, 0, w1, 0], + [0, s2, h1, 0], + [0, 0, f1, f2], + [0, 0, 1, 0], + ] + """ + K = kwargs.get("K", self.K) + if K is not None: + if K.shape != (self._N, 4, 4): + msg = "Expected K to have shape of (%r, 4, 4)" + raise ValueError(msg % (self._N)) + else: + K = self.compute_projection_matrix( + kwargs.get("znear", self.znear), + kwargs.get("zfar", self.zfar), + kwargs.get("fov", self.fov), + kwargs.get("aspect_ratio", self.aspect_ratio), + kwargs.get("degrees", self.degrees), + ) + + # Transpose the projection matrix as PyTorch3D transforms use row vectors. + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=self.device + ) + return transform + + def unproject_points( + self, + xy_depth: torch.Tensor, + world_coordinates: bool = True, + scaled_depth_input: bool = False, + **kwargs, + ) -> torch.Tensor: + """>! + FoV cameras further allow for passing depth in world units + (`scaled_depth_input=False`) or in the [0, 1]-normalized units + (`scaled_depth_input=True`) + + Args: + scaled_depth_input: If `True`, assumes the input depth is in + the [0, 1]-normalized units. If `False` the input depth is in + the world units. + """ + + # obtain the relevant transformation to ndc + if world_coordinates: + to_ndc_transform = self.get_full_projection_transform() + else: + to_ndc_transform = self.get_projection_transform() + + if scaled_depth_input: + # the input is scaled depth, so we don't have to do anything + xy_sdepth = xy_depth + else: + # parse out important values from the projection matrix + K_matrix = self.get_projection_transform(**kwargs.copy()).get_matrix() + # parse out f1, f2 from K_matrix + unsqueeze_shape = [1] * xy_depth.dim() + unsqueeze_shape[0] = K_matrix.shape[0] + f1 = K_matrix[:, 2, 2].reshape(unsqueeze_shape) + f2 = K_matrix[:, 3, 2].reshape(unsqueeze_shape) + # get the scaled depth + sdepth = (f1 * xy_depth[..., 2:3] + f2) / xy_depth[..., 2:3] + # concatenate xy + scaled depth + xy_sdepth = torch.cat((xy_depth[..., 0:2], sdepth), dim=-1) + + # unproject with inverse of the projection + unprojection_transform = to_ndc_transform.inverse() + return unprojection_transform.transform_points(xy_sdepth) + + def is_perspective(self): + return True + + def in_ndc(self): + return True + +####################################################################################### +## ██████╗ ███████╗███████╗██╗███╗ ██╗██╗████████╗██╗ ██████╗ ███╗ ██╗███████╗ ## +## ██╔══██╗██╔════╝██╔════╝██║████╗ ██║██║╚══██╔══╝██║██╔═══██╗████╗ ██║██╔════╝ ## +## ██║ ██║█████╗ █████╗ ██║██╔██╗ ██║██║ ██║ ██║██║ ██║██╔██╗ ██║███████╗ ## +## ██║ ██║██╔══╝ ██╔══╝ ██║██║╚██╗██║██║ ██║ ██║██║ ██║██║╚██╗██║╚════██║ ## +## ██████╔╝███████╗██║ ██║██║ ╚████║██║ ██║ ██║╚██████╔╝██║ ╚████║███████║ ## +## ╚═════╝ ╚══════╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚══════╝ ## +####################################################################################### + +def make_device(device: Device) -> torch.device: + """ + Makes an actual torch.device object from the device specified as + either a string or torch.device object. If the device is `cuda` without + a specific index, the index of the current device is assigned. + Args: + device: Device (as str or torch.device) + Returns: + A matching torch.device object + """ + device = torch.device(device) if isinstance(device, str) else device + if device.type == "cuda" and device.index is None: # pyre-ignore[16] + # If cuda but with no index, then the current cuda device is indicated. + # In that case, we fix to that device + device = torch.device(f"cuda:{torch.cuda.current_device()}") + return device + +def get_device(x, device: Optional[Device] = None) -> torch.device: + """ + Gets the device of the specified variable x if it is a tensor, or + falls back to a default CPU device otherwise. Allows overriding by + providing an explicit device. + Args: + x: a torch.Tensor to get the device from or another type + device: Device (as str or torch.device) to fall back to + Returns: + A matching torch.device object + """ + + # User overrides device + if device is not None: + return make_device(device) + + # Set device based on input tensor + if torch.is_tensor(x): + return x.device + + # Default device is cpu + return torch.device("cpu") + +def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor: + """ + Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == "X": + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + elif axis == "Y": + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + elif axis == "Z": + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + else: + raise ValueError("letter must be either X, Y or Z.") + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + +def euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str) -> torch.Tensor: + """ + Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError("Invalid input euler angles.") + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + matrices = [ + _axis_angle_rotation(c, e) + for c, e in zip(convention, torch.unbind(euler_angles, -1)) + ] + # return functools.reduce(torch.matmul, matrices) + return torch.matmul(torch.matmul(matrices[0], matrices[1]), matrices[2]) + +def _broadcast_bmm(a, b) -> torch.Tensor: + """ + Batch multiply two matrices and broadcast if necessary. + + Args: + a: torch tensor of shape (P, K) or (M, P, K) + b: torch tensor of shape (N, K, K) + + Returns: + a and b broadcast multiplied. The output batch dimension is max(N, M). + + To broadcast transforms across a batch dimension if M != N then + expect that either M = 1 or N = 1. The tensor with batch dimension 1 is + expanded to have shape N or M. + """ + if a.dim() == 2: + a = a[None] + if len(a) != len(b): + if not ((len(a) == 1) or (len(b) == 1)): + msg = "Expected batch dim for bmm to be equal or 1; got %r, %r" + raise ValueError(msg % (a.shape, b.shape)) + if len(a) == 1: + a = a.expand(len(b), -1, -1) + if len(b) == 1: + b = b.expand(len(a), -1, -1) + return a.bmm(b) + +def _safe_det_3x3(t: torch.Tensor): + """ + Fast determinant calculation for a batch of 3x3 matrices. + Note, result of this function might not be the same as `torch.det()`. + The differences might be in the last significant digit. + Args: + t: Tensor of shape (N, 3, 3). + Returns: + Tensor of shape (N) with determinants. + """ + + det = ( + t[..., 0, 0] * (t[..., 1, 1] * t[..., 2, 2] - t[..., 1, 2] * t[..., 2, 1]) + - t[..., 0, 1] * (t[..., 1, 0] * t[..., 2, 2] - t[..., 2, 0] * t[..., 1, 2]) + + t[..., 0, 2] * (t[..., 1, 0] * t[..., 2, 1] - t[..., 2, 0] * t[..., 1, 1]) + ) + + return det + +def get_world_to_view_transform( + R: torch.Tensor = _R, T: torch.Tensor = _T +) -> Transform3d: + """ + This function returns a Transform3d representing the transformation + matrix to go from world space to view space by applying a rotation and + a translation. + PyTorch3D uses the same convention as Hartley & Zisserman. + I.e., for camera extrinsic parameters R (rotation) and T (translation), + we map a 3D point `X_world` in world coordinates to + a point `X_cam` in camera coordinates with: + `X_cam = X_world R + T` + Args: + R: (N, 3, 3) matrix representing the rotation. + T: (N, 3) matrix representing the translation. + Returns: + a Transform3d object which represents the composed RT transformation. + """ + # TODO: also support the case where RT is specified as one matrix + # of shape (N, 4, 4). + + if T.shape[0] != R.shape[0]: + msg = "Expected R, T to have the same batch dimension; got %r, %r" + raise ValueError(msg % (R.shape[0], T.shape[0])) + if T.dim() != 2 or T.shape[1:] != (3,): + msg = "Expected T to have shape (N, 3); got %r" + raise ValueError(msg % repr(T.shape)) + if R.dim() != 3 or R.shape[1:] != (3, 3): + msg = "Expected R to have shape (N, 3, 3); got %r" + raise ValueError(msg % repr(R.shape)) + + # Create a Transform3d object + T_ = Translate(T, device=T.device) + R_ = Rotate(R, device=R.device) + return R_.compose(T_) + +def _check_valid_rotation_matrix(R, tol: float = 1e-7) -> None: + """ + Determine if R is a valid rotation matrix by checking it satisfies the + following conditions: + + ``RR^T = I and det(R) = 1`` + + Args: + R: an (N, 3, 3) matrix + + Returns: + None + + Emits a warning if R is an invalid rotation matrix. + """ + N = R.shape[0] + eye = torch.eye(3, dtype=R.dtype, device=R.device) + eye = eye.view(1, 3, 3).expand(N, -1, -1) + orthogonal = torch.allclose(R.bmm(R.transpose(1, 2)), eye, atol=tol) + det_R = _safe_det_3x3(R) + no_distortion = torch.allclose(det_R, torch.ones_like(det_R)) + if not (orthogonal and no_distortion): + msg = "R is not a valid rotation matrix" + warnings.warn(msg) + return + +def format_tensor( + input, + dtype: torch.dtype = torch.float32, + device: Device = "cpu", +) -> torch.Tensor: + """ + Helper function for converting a scalar value to a tensor. + Args: + input: Python scalar, Python list/tuple, torch scalar, 1D torch tensor + dtype: data type for the input + device: Device (as str or torch.device) on which the tensor should be placed. + Returns: + input_vec: torch tensor with optional added batch dimension. + """ + device_ = make_device(device) + if not torch.is_tensor(input): + input = torch.tensor(input, dtype=dtype, device=device_) + elif not input.device.type.startswith('mps'): + input = torch.tensor(input, dtype=torch.float32,device=device_) + + if input.dim() == 0: + input = input.view(1) + + if input.device == device_: + return input + + input = input.to(device=device) + return input + +def convert_to_tensors_and_broadcast( + *args, + dtype: torch.dtype = torch.float32, + device: Device = "cpu", +): + """ + Helper function to handle parsing an arbitrary number of inputs (*args) + which all need to have the same batch dimension. + The output is a list of tensors. + Args: + *args: an arbitrary number of inputs + Each of the values in `args` can be one of the following + - Python scalar + - Torch scalar + - Torch tensor of shape (N, K_i) or (1, K_i) where K_i are + an arbitrary number of dimensions which can vary for each + value in args. In this case each input is broadcast to a + tensor of shape (N, K_i) + dtype: data type to use when creating new tensors. + device: torch device on which the tensors should be placed. + Output: + args: A list of tensors of shape (N, K_i) + """ + # Convert all inputs to tensors with a batch dimension + args_1d = [format_tensor(c, dtype, device) for c in args] + + # Find broadcast size + sizes = [c.shape[0] for c in args_1d] + N = max(sizes) + + args_Nd = [] + for c in args_1d: + if c.shape[0] != 1 and c.shape[0] != N: + msg = "Got non-broadcastable sizes %r" % sizes + raise ValueError(msg) + + # Expand broadcast dim and keep non broadcast dims the same size + expand_sizes = (N,) + (-1,) * len(c.shape[1:]) + args_Nd.append(c.expand(*expand_sizes)) + + return args_Nd + +def _handle_coord(c, dtype: torch.dtype, device: torch.device) -> torch.Tensor: + """ + Helper function for _handle_input. + + Args: + c: Python scalar, torch scalar, or 1D torch tensor + + Returns: + c_vec: 1D torch tensor + """ + if not torch.is_tensor(c): + c = torch.tensor(c, dtype=dtype, device=device) + if c.dim() == 0: + c = c.view(1) + if c.device != device or c.dtype != dtype: + c = c.to(device=device, dtype=dtype) + return c + +def _handle_input( + x, + y, + z, + dtype: torch.dtype, + device: Optional[Device], + name: str, + allow_singleton: bool = False, +) -> torch.Tensor: + """ + Helper function to handle parsing logic for building transforms. The output + is always a tensor of shape (N, 3), but there are several types of allowed + input. + + Case I: Single Matrix + In this case x is a tensor of shape (N, 3), and y and z are None. Here just + return x. + + Case II: Vectors and Scalars + In this case each of x, y, and z can be one of the following + - Python scalar + - Torch scalar + - Torch tensor of shape (N, 1) or (1, 1) + In this case x, y and z are broadcast to tensors of shape (N, 1) + and concatenated to a tensor of shape (N, 3) + + Case III: Singleton (only if allow_singleton=True) + In this case y and z are None, and x can be one of the following: + - Python scalar + - Torch scalar + - Torch tensor of shape (N, 1) or (1, 1) + Here x will be duplicated 3 times, and we return a tensor of shape (N, 3) + + Returns: + xyz: Tensor of shape (N, 3) + """ + device_ = get_device(x, device) + # If x is actually a tensor of shape (N, 3) then just return it + if torch.is_tensor(x) and x.dim() == 2: + if x.shape[1] != 3: + msg = "Expected tensor of shape (N, 3); got %r (in %s)" + raise ValueError(msg % (x.shape, name)) + if y is not None or z is not None: + msg = "Expected y and z to be None (in %s)" % name + raise ValueError(msg) + return x.to(device=device_, dtype=dtype) + + if allow_singleton and y is None and z is None: + y = x + z = x + + # Convert all to 1D tensors + xyz = [_handle_coord(c, dtype, device_) for c in [x, y, z]] + + # Broadcast and concatenate + sizes = [c.shape[0] for c in xyz] + N = max(sizes) + for c in xyz: + if c.shape[0] != 1 and c.shape[0] != N: + msg = "Got non-broadcastable sizes %r (in %s)" % (sizes, name) + raise ValueError(msg) + xyz = [c.expand(N) for c in xyz] + xyz = torch.stack(xyz, dim=1) + return xyz diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/inference_video.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/inference_video.py new file mode 100644 index 0000000000000000000000000000000000000000..6300c7d6a4c6b4696b40c7a9aad092aa6abb9a16 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/inference_video.py @@ -0,0 +1,281 @@ +# thanks to https://github.com/n00mkrad for the inspiration and a bit of code. Also thanks for https://github.com/XmYx for the initial reorganization of this script +import os +from types import SimpleNamespace +import cv2 +import torch +import shutil +import numpy as np +from tqdm import tqdm +from torch.nn import functional as F +import warnings +import _thread +from queue import Queue +import time +from .model.pytorch_msssim import ssim_matlab + +from deforum_helpers.video_audio_utilities import ffmpeg_stitch_video +from deforum_helpers.general_utils import duplicate_pngs_from_folder + +warnings.filterwarnings("ignore") + +def run_rife_new_video_infer( + output=None, + model=None, + fp16=False, + UHD=False, # *Will be received as *True* if imgs/vid resolution is 2K or higher* + scale=1.0, + fps=None, + deforum_models_path=None, + raw_output_imgs_path=None, + img_batch_id=None, + ffmpeg_location=None, + audio_track=None, + interp_x_amount=2, + slow_mo_enabled=False, + slow_mo_x_amount=2, + ffmpeg_crf=17, + ffmpeg_preset='veryslow', + keep_imgs=False, + orig_vid_name = None, + srt_path = None): + + args = SimpleNamespace() + args.output = output + args.modelDir = model + args.fp16 = fp16 + args.UHD = UHD + args.scale = scale + args.fps = fps + args.deforum_models_path = deforum_models_path + args.raw_output_imgs_path = raw_output_imgs_path + args.img_batch_id = img_batch_id + args.ffmpeg_location = ffmpeg_location + args.audio_track = audio_track + args.interp_x_amount = interp_x_amount + args.slow_mo_enabled = slow_mo_enabled + args.slow_mo_x_amount = slow_mo_x_amount + args.ffmpeg_crf = ffmpeg_crf + args.ffmpeg_preset = ffmpeg_preset + args.keep_imgs = keep_imgs + args.orig_vid_name = orig_vid_name + + if args.UHD and args.scale == 1.0: + args.scale = 0.5 + + start_time = time.time() + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + torch.set_grad_enabled(False) + if torch.cuda.is_available(): + torch.backends.cudnn.enabled = True + torch.backends.cudnn.benchmark = True + # TODO: Can/ need to handle this? currently it's always False and give errors if True but faster speeds on tensortcore equipped gpus? + if (args.fp16): + torch.set_default_tensor_type(torch.cuda.HalfTensor) + if args.modelDir is not None: + try: + from .rife_new_gen.RIFE_HDv3 import Model + except ImportError as e: + raise ValueError(f"{args.modelDir} could not be found. Please contact deforum support {e}") + except Exception as e: + raise ValueError(f"An error occured while trying to import {args.modelDir}: {e}") + else: + print("Got a request to frame-interpolate but no valid frame interpolation engine value provided. Doing... nothing") + return + + model = Model() + if not hasattr(model, 'version'): + model.version = 0 + model.load_model(args.modelDir, -1, deforum_models_path) + model.eval() + model.device() + + print(f"{args.modelDir}.pkl model successfully loaded into memory") + print("Interpolation progress (it's OK if it finishes before 100%):") + + interpolated_path = os.path.join(args.raw_output_imgs_path, 'interpolated_frames_rife') + # set custom name depending on if we interpolate after a run, or interpolate a video (related/unrelated to deforum, we don't know) directly from within the RIFE tab + if args.orig_vid_name is not None: # interpolating a video (deforum or unrelated) + custom_interp_path = "{}_{}".format(interpolated_path, args.orig_vid_name) + else: # interpolating after a deforum run: + custom_interp_path = "{}_{}".format(interpolated_path, args.img_batch_id) + + # In this folder we temporarily keep the original frames (converted/ copy-pasted and img format depends on scenario) + # the convertion case is done to avert a problem with 24 and 32 mixed outputs from the same animation run + temp_convert_raw_png_path = os.path.join(args.raw_output_imgs_path, "tmp_rife_folder") + + duplicate_pngs_from_folder(args.raw_output_imgs_path, temp_convert_raw_png_path, args.img_batch_id, args.orig_vid_name) + + videogen = [] + for f in os.listdir(temp_convert_raw_png_path): + # double check for old _depth_ files, not really needed probably but keeping it for now + if '_depth_' not in f: + videogen.append(f) + tot_frame = len(videogen) + videogen.sort(key= lambda x:int(x.split('.')[0])) + img_path = os.path.join(temp_convert_raw_png_path, videogen[0]) + lastframe = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_UNCHANGED)[:, :, ::-1].copy() + videogen = videogen[1:] + h, w, _ = lastframe.shape + vid_out = None + + if not os.path.exists(custom_interp_path): + os.mkdir(custom_interp_path) + + tmp = max(128, int(128 / args.scale)) + ph = ((h - 1) // tmp + 1) * tmp + pw = ((w - 1) // tmp + 1) * tmp + padding = (0, pw - w, 0, ph - h) + pbar = tqdm(total=tot_frame) + + write_buffer = Queue(maxsize=500) + read_buffer = Queue(maxsize=500) + + _thread.start_new_thread(build_read_buffer, (args, read_buffer, videogen, temp_convert_raw_png_path)) + _thread.start_new_thread(clear_write_buffer, (args, write_buffer, custom_interp_path)) + + I1 = torch.from_numpy(np.transpose(lastframe, (2, 0, 1))).to(device, non_blocking=True).unsqueeze(0).float() / 255. + I1 = pad_image(I1, args.fp16, padding) + temp = None # save lastframe when processing static frame + + while True: + if temp is not None: + frame = temp + temp = None + else: + frame = read_buffer.get() + if frame is None: + break + I0 = I1 + I1 = torch.from_numpy(np.transpose(frame, (2, 0, 1))).to(device, non_blocking=True).unsqueeze(0).float() / 255. + I1 = pad_image(I1, args.fp16, padding) + I0_small = F.interpolate(I0, (32, 32), mode='bilinear', align_corners=False) + I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False) + ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3]) + + break_flag = False + if ssim > 0.996: + frame = read_buffer.get() # read a new frame + if frame is None: + break_flag = True + frame = lastframe + else: + temp = frame + I1 = torch.from_numpy(np.transpose(frame, (2, 0, 1))).to(device, non_blocking=True).unsqueeze(0).float() / 255. + I1 = pad_image(I1, args.fp16, padding) + I1 = model.inference(I0, I1, args.scale) + I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False) + ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3]) + frame = (I1[0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w] + + if ssim < 0.2: + output = [] + for i in range(args.interp_x_amount - 1): + output.append(I0) + else: + output = make_inference(model, I0, I1, args.interp_x_amount - 1, scale) + + write_buffer.put(lastframe) + for mid in output: + mid = (((mid[0] * 255.).byte().cpu().numpy().transpose(1, 2, 0))) + write_buffer.put(mid[:h, :w]) + pbar.update(1) + lastframe = frame + if break_flag: + break + + write_buffer.put(lastframe) + + while (not write_buffer.empty()): + time.sleep(0.1) + pbar.close() + shutil.rmtree(temp_convert_raw_png_path) + + print(f"Interpolation \033[0;32mdone\033[0m in {time.time()-start_time:.2f} seconds!") + # stitch video from interpolated frames, and add audio if needed + try: + print (f"*Passing interpolated frames to ffmpeg...*") + vid_out_path = stitch_video(args.img_batch_id, args.fps, custom_interp_path, args.audio_track, args.ffmpeg_location, args.interp_x_amount, args.slow_mo_enabled, args.slow_mo_x_amount, args.ffmpeg_crf, args.ffmpeg_preset, args.keep_imgs, args.orig_vid_name, srt_path=srt_path) + # remove folder with raw (non-interpolated) vid input frames in case of input VID and not PNGs + if orig_vid_name is not None: + shutil.rmtree(raw_output_imgs_path) + return vid_out_path + except Exception as e: + print(f'Video stitching gone wrong. *Interpolated frames were saved to HD as backup!*. Actual error: {e}') + +def clear_write_buffer(user_args, write_buffer, custom_interp_path): + cnt = 0 + + while True: + item = write_buffer.get() + if item is None: + break + filename = '{}/{:0>9d}.png'.format(custom_interp_path, cnt) + + cv2.imwrite(filename, item[:, :, ::-1]) + + cnt += 1 + +def build_read_buffer(user_args, read_buffer, videogen, temp_convert_raw_png_path): + for frame in videogen: + if not temp_convert_raw_png_path is None: + img_path = os.path.join(temp_convert_raw_png_path, frame) + frame = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_UNCHANGED)[:, :, ::-1].copy() + read_buffer.put(frame) + read_buffer.put(None) + +def make_inference(model, I0, I1, n, scale): + if model.version >= 3.9: + res = [] + for i in range(n): + res.append(model.inference(I0, I1, (i + 1) * 1. / (n + 1), scale)) + return res + else: + middle = model.inference(I0, I1, scale) + if n == 1: + return [middle] + first_half = make_inference(model, I0, middle, n=n // 2, scale=scale) + second_half = make_inference(model, middle, I1, n=n // 2, scale=scale) + if n % 2: + return [*first_half, middle, *second_half] + else: + return [*first_half, *second_half] + +def pad_image(img, fp16, padding): + if (fp16): + return F.pad(img, padding).half() + else: + return F.pad(img, padding) + +# TODO: move to fream_interpolation and add FILM to it! +def stitch_video(img_batch_id, fps, img_folder_path, audio_path, ffmpeg_location, interp_x_amount, slow_mo_enabled, slow_mo_x_amount, f_crf, f_preset, keep_imgs, orig_vid_name, srt_path=None): + parent_folder = os.path.dirname(img_folder_path) + grandparent_folder = os.path.dirname(parent_folder) + if orig_vid_name is not None: + mp4_path = os.path.join(grandparent_folder, str(orig_vid_name) +'_RIFE_' + 'x' + str(interp_x_amount)) + else: + mp4_path = os.path.join(parent_folder, str(img_batch_id) +'_RIFE_' + 'x' + str(interp_x_amount)) + + if slow_mo_enabled: + mp4_path = mp4_path + '_slomo_x' + str(slow_mo_x_amount) + mp4_path = mp4_path + '.mp4' + + t = os.path.join(img_folder_path, "%09d.png") + add_soundtrack = 'None' + if not audio_path is None: + add_soundtrack = 'File' + + exception_raised = False + try: + ffmpeg_stitch_video(ffmpeg_location=ffmpeg_location, fps=fps, outmp4_path=mp4_path, stitch_from_frame=0, stitch_to_frame=1000000, imgs_path=t, add_soundtrack=add_soundtrack, audio_path=audio_path, crf=f_crf, preset=f_preset, srt_path=srt_path) + except Exception as e: + exception_raised = True + print(f"An error occurred while stitching the video: {e}") + + if not exception_raised and not keep_imgs: + shutil.rmtree(img_folder_path) + + if (keep_imgs and orig_vid_name is not None) or (orig_vid_name is not None and exception_raised is True): + shutil.move(img_folder_path, grandparent_folder) + + return mp4_path \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/model/loss.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/model/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..72e5de6af050df7d55c2871a69637077970ddfb9 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/model/loss.py @@ -0,0 +1,128 @@ +import torch +import numpy as np +import torch.nn as nn +import torch.nn.functional as F +import torchvision.models as models + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + +class EPE(nn.Module): + def __init__(self): + super(EPE, self).__init__() + + def forward(self, flow, gt, loss_mask): + loss_map = (flow - gt.detach()) ** 2 + loss_map = (loss_map.sum(1, True) + 1e-6) ** 0.5 + return (loss_map * loss_mask) + + +class Ternary(nn.Module): + def __init__(self): + super(Ternary, self).__init__() + patch_size = 7 + out_channels = patch_size * patch_size + self.w = np.eye(out_channels).reshape( + (patch_size, patch_size, 1, out_channels)) + self.w = np.transpose(self.w, (3, 2, 0, 1)) + self.w = torch.tensor(self.w).float().to(device) + + def transform(self, img): + patches = F.conv2d(img, self.w, padding=3, bias=None) + transf = patches - img + transf_norm = transf / torch.sqrt(0.81 + transf**2) + return transf_norm + + def rgb2gray(self, rgb): + r, g, b = rgb[:, 0:1, :, :], rgb[:, 1:2, :, :], rgb[:, 2:3, :, :] + gray = 0.2989 * r + 0.5870 * g + 0.1140 * b + return gray + + def hamming(self, t1, t2): + dist = (t1 - t2) ** 2 + dist_norm = torch.mean(dist / (0.1 + dist), 1, True) + return dist_norm + + def valid_mask(self, t, padding): + n, _, h, w = t.size() + inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t) + mask = F.pad(inner, [padding] * 4) + return mask + + def forward(self, img0, img1): + img0 = self.transform(self.rgb2gray(img0)) + img1 = self.transform(self.rgb2gray(img1)) + return self.hamming(img0, img1) * self.valid_mask(img0, 1) + + +class SOBEL(nn.Module): + def __init__(self): + super(SOBEL, self).__init__() + self.kernelX = torch.tensor([ + [1, 0, -1], + [2, 0, -2], + [1, 0, -1], + ]).float() + self.kernelY = self.kernelX.clone().T + self.kernelX = self.kernelX.unsqueeze(0).unsqueeze(0).to(device) + self.kernelY = self.kernelY.unsqueeze(0).unsqueeze(0).to(device) + + def forward(self, pred, gt): + N, C, H, W = pred.shape[0], pred.shape[1], pred.shape[2], pred.shape[3] + img_stack = torch.cat( + [pred.reshape(N*C, 1, H, W), gt.reshape(N*C, 1, H, W)], 0) + sobel_stack_x = F.conv2d(img_stack, self.kernelX, padding=1) + sobel_stack_y = F.conv2d(img_stack, self.kernelY, padding=1) + pred_X, gt_X = sobel_stack_x[:N*C], sobel_stack_x[N*C:] + pred_Y, gt_Y = sobel_stack_y[:N*C], sobel_stack_y[N*C:] + + L1X, L1Y = torch.abs(pred_X-gt_X), torch.abs(pred_Y-gt_Y) + loss = (L1X+L1Y) + return loss + +class MeanShift(nn.Conv2d): + def __init__(self, data_mean, data_std, data_range=1, norm=True): + c = len(data_mean) + super(MeanShift, self).__init__(c, c, kernel_size=1) + std = torch.Tensor(data_std) + self.weight.data = torch.eye(c).view(c, c, 1, 1) + if norm: + self.weight.data.div_(std.view(c, 1, 1, 1)) + self.bias.data = -1 * data_range * torch.Tensor(data_mean) + self.bias.data.div_(std) + else: + self.weight.data.mul_(std.view(c, 1, 1, 1)) + self.bias.data = data_range * torch.Tensor(data_mean) + self.requires_grad = False + +class VGGPerceptualLoss(torch.nn.Module): + def __init__(self, rank=0): + super(VGGPerceptualLoss, self).__init__() + blocks = [] + pretrained = True + self.vgg_pretrained_features = models.vgg19(pretrained=pretrained).features + self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).cuda() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X, Y, indices=None): + X = self.normalize(X) + Y = self.normalize(Y) + indices = [2, 7, 12, 21, 30] + weights = [1.0/2.6, 1.0/4.8, 1.0/3.7, 1.0/5.6, 10/1.5] + k = 0 + loss = 0 + for i in range(indices[-1]): + X = self.vgg_pretrained_features[i](X) + Y = self.vgg_pretrained_features[i](Y) + if (i+1) in indices: + loss += weights[k] * (X - Y.detach()).abs().mean() * 0.1 + k += 1 + return loss + +if __name__ == '__main__': + img0 = torch.zeros(3, 3, 256, 256).float().to(device) + img1 = torch.tensor(np.random.normal( + 0, 1, (3, 3, 256, 256))).float().to(device) + ternary_loss = Ternary() + print(ternary_loss(img0, img1).shape) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/model/pytorch_msssim/__init__.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/model/pytorch_msssim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a4d30326188cf6afacf2fc84c7ae18efe14dae2e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/model/pytorch_msssim/__init__.py @@ -0,0 +1,200 @@ +import torch +import torch.nn.functional as F +from math import exp +import numpy as np + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +def gaussian(window_size, sigma): + gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)]) + return gauss/gauss.sum() + + +def create_window(window_size, channel=1): + _1D_window = gaussian(window_size, 1.5).unsqueeze(1) + _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0).to(device) + window = _2D_window.expand(channel, 1, window_size, window_size).contiguous() + return window + +def create_window_3d(window_size, channel=1): + _1D_window = gaussian(window_size, 1.5).unsqueeze(1) + _2D_window = _1D_window.mm(_1D_window.t()) + _3D_window = _2D_window.unsqueeze(2) @ (_1D_window.t()) + window = _3D_window.expand(1, channel, window_size, window_size, window_size).contiguous().to(device) + return window + + +def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None): + # Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh). + if val_range is None: + if torch.max(img1) > 128: + max_val = 255 + else: + max_val = 1 + + if torch.min(img1) < -0.5: + min_val = -1 + else: + min_val = 0 + L = max_val - min_val + else: + L = val_range + + padd = 0 + (_, channel, height, width) = img1.size() + if window is None: + real_size = min(window_size, height, width) + window = create_window(real_size, channel=channel).to(img1.device) + + # mu1 = F.conv2d(img1, window, padding=padd, groups=channel) + # mu2 = F.conv2d(img2, window, padding=padd, groups=channel) + mu1 = F.conv2d(F.pad(img1, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel) + mu2 = F.conv2d(F.pad(img2, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel) + + mu1_sq = mu1.pow(2) + mu2_sq = mu2.pow(2) + mu1_mu2 = mu1 * mu2 + + sigma1_sq = F.conv2d(F.pad(img1 * img1, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_sq + sigma2_sq = F.conv2d(F.pad(img2 * img2, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu2_sq + sigma12 = F.conv2d(F.pad(img1 * img2, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_mu2 + + C1 = (0.01 * L) ** 2 + C2 = (0.03 * L) ** 2 + + v1 = 2.0 * sigma12 + C2 + v2 = sigma1_sq + sigma2_sq + C2 + cs = torch.mean(v1 / v2) # contrast sensitivity + + ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2) + + if size_average: + ret = ssim_map.mean() + else: + ret = ssim_map.mean(1).mean(1).mean(1) + + if full: + return ret, cs + return ret + + +def ssim_matlab(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None): + # Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh). + if val_range is None: + if torch.max(img1) > 128: + max_val = 255 + else: + max_val = 1 + + if torch.min(img1) < -0.5: + min_val = -1 + else: + min_val = 0 + L = max_val - min_val + else: + L = val_range + + padd = 0 + (_, _, height, width) = img1.size() + if window is None: + real_size = min(window_size, height, width) + window = create_window_3d(real_size, channel=1).to(img1.device) + # Channel is set to 1 since we consider color images as volumetric images + + img1 = img1.unsqueeze(1) + img2 = img2.unsqueeze(1) + + mu1 = F.conv3d(F.pad(img1, (5, 5, 5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=1) + mu2 = F.conv3d(F.pad(img2, (5, 5, 5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=1) + + mu1_sq = mu1.pow(2) + mu2_sq = mu2.pow(2) + mu1_mu2 = mu1 * mu2 + + sigma1_sq = F.conv3d(F.pad(img1 * img1, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu1_sq + sigma2_sq = F.conv3d(F.pad(img2 * img2, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu2_sq + sigma12 = F.conv3d(F.pad(img1 * img2, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu1_mu2 + + C1 = (0.01 * L) ** 2 + C2 = (0.03 * L) ** 2 + + v1 = 2.0 * sigma12 + C2 + v2 = sigma1_sq + sigma2_sq + C2 + cs = torch.mean(v1 / v2) # contrast sensitivity + + ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2) + + if size_average: + ret = ssim_map.mean() + else: + ret = ssim_map.mean(1).mean(1).mean(1) + + if full: + return ret, cs + return ret + + +def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False): + device = img1.device + weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device) + levels = weights.size()[0] + mssim = [] + mcs = [] + for _ in range(levels): + sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range) + mssim.append(sim) + mcs.append(cs) + + img1 = F.avg_pool2d(img1, (2, 2)) + img2 = F.avg_pool2d(img2, (2, 2)) + + mssim = torch.stack(mssim) + mcs = torch.stack(mcs) + + # Normalize (to avoid NaNs during training unstable models, not compliant with original definition) + if normalize: + mssim = (mssim + 1) / 2 + mcs = (mcs + 1) / 2 + + pow1 = mcs ** weights + pow2 = mssim ** weights + # From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/ + output = torch.prod(pow1[:-1] * pow2[-1]) + return output + + +# Classes to re-use window +class SSIM(torch.nn.Module): + def __init__(self, window_size=11, size_average=True, val_range=None): + super(SSIM, self).__init__() + self.window_size = window_size + self.size_average = size_average + self.val_range = val_range + + # Assume 3 channel for SSIM + self.channel = 3 + self.window = create_window(window_size, channel=self.channel) + + def forward(self, img1, img2): + (_, channel, _, _) = img1.size() + + if channel == self.channel and self.window.dtype == img1.dtype: + window = self.window + else: + window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype) + self.window = window + self.channel = channel + + _ssim = ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average) + dssim = (1 - _ssim) / 2 + return dssim + +class MSSSIM(torch.nn.Module): + def __init__(self, window_size=11, size_average=True, channel=3): + super(MSSSIM, self).__init__() + self.window_size = window_size + self.size_average = size_average + self.channel = channel + + def forward(self, img1, img2): + return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/model/warplayer.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/model/warplayer.py new file mode 100644 index 0000000000000000000000000000000000000000..21b0b904cf71b297fd43813134c57d13a3ae9e4a --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/model/warplayer.py @@ -0,0 +1,22 @@ +import torch +import torch.nn as nn + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +backwarp_tenGrid = {} + + +def warp(tenInput, tenFlow): + k = (str(tenFlow.device), str(tenFlow.size())) + if k not in backwarp_tenGrid: + tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3], device=device).view( + 1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1) + tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2], device=device).view( + 1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3]) + backwarp_tenGrid[k] = torch.cat( + [tenHorizontal, tenVertical], 1).to(device) + + tenFlow = torch.cat([tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0), + tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0)], 1) + + g = (backwarp_tenGrid[k] + tenFlow).permute(0, 2, 3, 1) + return torch.nn.functional.grid_sample(input=tenInput, grid=g, mode='bilinear', padding_mode='border', align_corners=True) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/rife_new_gen/IFNet_HDv3.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/rife_new_gen/IFNet_HDv3.py new file mode 100644 index 0000000000000000000000000000000000000000..2360c9e7d15ad4c73e8bb34112999e3d46aeb8c2 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/rife_new_gen/IFNet_HDv3.py @@ -0,0 +1,129 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from ..model.warplayer import warp +# from train_log.refine import * + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): + return nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, + padding=padding, dilation=dilation, bias=True), + nn.LeakyReLU(0.2, True) + ) + +def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): + return nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, + padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(out_planes), + nn.LeakyReLU(0.2, True) + ) + +class ResConv(nn.Module): + def __init__(self, c, dilation=1): + super(ResConv, self).__init__() + self.conv = nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1\ +) + self.beta = nn.Parameter(torch.ones((1, c, 1, 1)), requires_grad=True) + self.relu = nn.LeakyReLU(0.2, True) + + def forward(self, x): + return self.relu(self.conv(x) * self.beta + x) + +class IFBlock(nn.Module): + def __init__(self, in_planes, c=64): + super(IFBlock, self).__init__() + self.conv0 = nn.Sequential( + conv(in_planes, c//2, 3, 2, 1), + conv(c//2, c, 3, 2, 1), + ) + self.convblock = nn.Sequential( + ResConv(c), + ResConv(c), + ResConv(c), + ResConv(c), + ResConv(c), + ResConv(c), + ResConv(c), + ResConv(c), + ) + self.lastconv = nn.Sequential( + nn.ConvTranspose2d(c, 4*6, 4, 2, 1), + nn.PixelShuffle(2) + ) + + def forward(self, x, flow=None, scale=1): + x = F.interpolate(x, scale_factor= 1. / scale, mode="bilinear", align_corners=False) + if flow is not None: + flow = F.interpolate(flow, scale_factor= 1. / scale, mode="bilinear", align_corners=False) * 1. / scale + x = torch.cat((x, flow), 1) + feat = self.conv0(x) + feat = self.convblock(feat) + tmp = self.lastconv(feat) + tmp = F.interpolate(tmp, scale_factor=scale, mode="bilinear", align_corners=False) + flow = tmp[:, :4] * scale + mask = tmp[:, 4:5] + return flow, mask + +class IFNet(nn.Module): + def __init__(self): + super(IFNet, self).__init__() + self.block0 = IFBlock(7, c=192) + self.block1 = IFBlock(8+4, c=128) + self.block2 = IFBlock(8+4, c=96) + self.block3 = IFBlock(8+4, c=64) + # self.contextnet = Contextnet() + # self.unet = Unet() + + def forward( self, x, timestep=0.5, scale_list=[8, 4, 2, 1], training=False, fastmode=True, ensemble=False): + if training == False: + channel = x.shape[1] // 2 + img0 = x[:, :channel] + img1 = x[:, channel:] + if not torch.is_tensor(timestep): + timestep = (x[:, :1].clone() * 0 + 1) * timestep + else: + timestep = timestep.repeat(1, 1, img0.shape[2], img0.shape[3]) + flow_list = [] + merged = [] + mask_list = [] + warped_img0 = img0 + warped_img1 = img1 + flow = None + mask = None + loss_cons = 0 + block = [self.block0, self.block1, self.block2, self.block3] + for i in range(4): + if flow is None: + flow, mask = block[i](torch.cat((img0[:, :3], img1[:, :3], timestep), 1), None, scale=scale_list[i]) + if ensemble: + f1, m1 = block[i](torch.cat((img1[:, :3], img0[:, :3], 1-timestep), 1), None, scale=scale_list[i]) + flow = (flow + torch.cat((f1[:, 2:4], f1[:, :2]), 1)) / 2 + mask = (mask + (-m1)) / 2 + else: + f0, m0 = block[i](torch.cat((warped_img0[:, :3], warped_img1[:, :3], timestep, mask), 1), flow, scale=scale_list[i]) + if ensemble: + f1, m1 = block[i](torch.cat((warped_img1[:, :3], warped_img0[:, :3], 1-timestep, -mask), 1), torch.cat((flow[:, 2:4], flow[:, :2]), 1), scale=scale_list[i]) + f0 = (f0 + torch.cat((f1[:, 2:4], f1[:, :2]), 1)) / 2 + m0 = (m0 + (-m1)) / 2 + flow = flow + f0 + mask = mask + m0 + mask_list.append(mask) + flow_list.append(flow) + warped_img0 = warp(img0, flow[:, :2]) + warped_img1 = warp(img1, flow[:, 2:4]) + merged.append((warped_img0, warped_img1)) + mask_list[3] = torch.sigmoid(mask_list[3]) + merged[3] = merged[3][0] * mask_list[3] + merged[3][1] * (1 - mask_list[3]) + if not fastmode: + print('contextnet is removed') + ''' + c0 = self.contextnet(img0, flow[:, :2]) + c1 = self.contextnet(img1, flow[:, 2:4]) + tmp = self.unet(img0, img1, warped_img0, warped_img1, mask, flow, c0, c1) + res = tmp[:, :3] * 2 - 1 + merged[3] = torch.clamp(merged[3] + res, 0, 1) + ''' + return flow_list, mask_list[3], merged diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/rife_new_gen/RIFE_HDv3.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/rife_new_gen/RIFE_HDv3.py new file mode 100644 index 0000000000000000000000000000000000000000..e51408af7e574bc4c8b03739cda0bacd73cd0081 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/rife_new_gen/RIFE_HDv3.py @@ -0,0 +1,108 @@ +import os, sys +import torch +import torch.nn as nn +import numpy as np +from torch.optim import AdamW +import torch.optim as optim +import itertools +from ..model.warplayer import warp +from torch.nn.parallel import DistributedDataParallel as DDP +from .IFNet_HDv3 import * +import torch.nn.functional as F +from ..model.loss import * +sys.path.append('../../') +from deforum_helpers.general_utils import checksum + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +class Model: + def __init__(self, local_rank=-1): + self.flownet = IFNet() + self.device() + self.optimG = AdamW(self.flownet.parameters(), lr=1e-6, weight_decay=1e-4) + self.epe = EPE() + self.version = 3.9 + # self.vgg = VGGPerceptualLoss().to(device) + self.sobel = SOBEL() + if local_rank != -1: + self.flownet = DDP(self.flownet, device_ids=[local_rank], output_device=local_rank) + + def train(self): + self.flownet.train() + + def eval(self): + self.flownet.eval() + + def device(self): + self.flownet.to(device) + + def load_model(self, path, rank, deforum_models_path): + + download_rife_model(path, deforum_models_path) + + def convert(param): + if rank == -1: + return { + k.replace("module.", ""): v + for k, v in param.items() + if "module." in k + } + else: + return param + if rank <= 0: + if torch.cuda.is_available(): + self.flownet.load_state_dict(convert(torch.load(os.path.join(deforum_models_path,'{}.pkl').format(path))), False) + else: + self.flownet.load_state_dict(convert(torch.load(os.path.join(deforum_models_path,'{}.pkl').format(path), map_location ='cpu')), False) + + def inference(self, img0, img1, timestep=0.5, scale=1.0): + imgs = torch.cat((img0, img1), 1) + scale_list = [8/scale, 4/scale, 2/scale, 1/scale] + flow, mask, merged = self.flownet(imgs, timestep, scale_list) + return merged[3] + + def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None): + for param_group in self.optimG.param_groups: + param_group['lr'] = learning_rate + img0 = imgs[:, :3] + img1 = imgs[:, 3:] + if training: + self.train() + else: + self.eval() + scale = [8, 4, 2, 1] + flow, mask, merged = self.flownet(torch.cat((imgs, gt), 1), scale=scale, training=training) + loss_l1 = (merged[3] - gt).abs().mean() + loss_smooth = self.sobel(flow[3], flow[3]*0).mean() + # loss_vgg = self.vgg(merged[2], gt) + if training: + self.optimG.zero_grad() + loss_G = loss_l1 + loss_cons + loss_smooth * 0.1 + loss_G.backward() + self.optimG.step() + else: + flow_teacher = flow[2] + return merged[3], { + 'mask': mask, + 'flow': flow[3][:, :2], + 'loss_l1': loss_l1, + 'loss_cons': loss_cons, + 'loss_smooth': loss_smooth, + } + +def download_rife_model(path, deforum_models_path): + options = {'RIFE46': ( + 'af6f0b4bed96dea2c9f0624b449216c7adfaf7f0b722fba0c8f5c6e20b2ec39559cf33f3d238d53b160c22f00c6eaa47dc54a6e4f8aa4f59a6e4a9e90e1a808a', + "https://github.com/hithereai/Practical-RIFE/releases/download/rife46/RIFE46.pkl"), + 'RIFE43': ('ed660f58708ee369a0b3855f64d2d07a6997d949f33067faae51d740123c5ee015901cc57553594f2df8ec08131a1c5f7c883c481eac0f9addd84379acea90c8', + "https://github.com/hithereai/Practical-RIFE/releases/download/rife43/RIFE43.pkl"), + 'RIFE40': ('0baf0bed23597cda402a97a80a7d14c26a9ed797d2fc0790aac93b19ca5b0f50676ba07aa9f8423cf061ed881ece6e67922f001ea402bfced83ef67675142ce7', + "https://github.com/hithereai/Practical-RIFE/releases/download/rife40/RIFE40.pkl")} + if path in options: + target_file = f"{path}.pkl" + target_path = os.path.join(deforum_models_path, target_file) + if not os.path.exists(target_path): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(options[path][1], deforum_models_path) + if checksum(target_path) != options[path][0]: + raise Exception(f"Error while downloading {target_file}. Please download from here: {options[path][1]} and place in: " + deforum_models_path) \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/rife_new_gen/refine.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/rife_new_gen/refine.py new file mode 100644 index 0000000000000000000000000000000000000000..ff3807c636d461862f13200fe0017b62db5c20c5 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/rife/rife_new_gen/refine.py @@ -0,0 +1,90 @@ +import torch +import torch.nn as nn +import numpy as np +from torch.optim import AdamW +import torch.optim as optim +import itertools +from model.warplayer import warp +from torch.nn.parallel import DistributedDataParallel as DDP +import torch.nn.functional as F + +device = torch.device("cuda") + +def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): + return nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, + padding=padding, dilation=dilation, bias=True), + nn.PReLU(out_planes) + ) + +def conv_woact(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): + return nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, + padding=padding, dilation=dilation, bias=True), + ) + +def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1): + return nn.Sequential( + torch.nn.ConvTranspose2d(in_channels=in_planes, out_channels=out_planes, kernel_size=4, stride=2, padding=1, bias=True), + nn.PReLU(out_planes) + ) + +class Conv2(nn.Module): + def __init__(self, in_planes, out_planes, stride=2): + super(Conv2, self).__init__() + self.conv1 = conv(in_planes, out_planes, 3, stride, 1) + self.conv2 = conv(out_planes, out_planes, 3, 1, 1) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + +c = 16 +class Contextnet(nn.Module): + def __init__(self): + super(Contextnet, self).__init__() + self.conv1 = Conv2(3, c) + self.conv2 = Conv2(c, 2*c) + self.conv3 = Conv2(2*c, 4*c) + self.conv4 = Conv2(4*c, 8*c) + + def forward(self, x, flow): + x = self.conv1(x) + flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5 + f1 = warp(x, flow) + x = self.conv2(x) + flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5 + f2 = warp(x, flow) + x = self.conv3(x) + flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5 + f3 = warp(x, flow) + x = self.conv4(x) + flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5 + f4 = warp(x, flow) + return [f1, f2, f3, f4] + +class Unet(nn.Module): + def __init__(self): + super(Unet, self).__init__() + self.down0 = Conv2(17, 2*c) + self.down1 = Conv2(4*c, 4*c) + self.down2 = Conv2(8*c, 8*c) + self.down3 = Conv2(16*c, 16*c) + self.up0 = deconv(32*c, 8*c) + self.up1 = deconv(16*c, 4*c) + self.up2 = deconv(8*c, 2*c) + self.up3 = deconv(4*c, c) + self.conv = nn.Conv2d(c, 3, 3, 1, 1) + + def forward(self, img0, img1, warped_img0, warped_img1, mask, flow, c0, c1): + s0 = self.down0(torch.cat((img0, img1, warped_img0, warped_img1, mask, flow), 1)) + s1 = self.down1(torch.cat((s0, c0[0], c1[0]), 1)) + s2 = self.down2(torch.cat((s1, c0[1], c1[1]), 1)) + s3 = self.down3(torch.cat((s2, c0[2], c1[2]), 1)) + x = self.up0(torch.cat((s3, c0[3], c1[3]), 1)) + x = self.up1(torch.cat((x, s2), 1)) + x = self.up2(torch.cat((x, s1), 1)) + x = self.up3(torch.cat((x, s0), 1)) + x = self.conv(x) + return torch.sigmoid(x) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/utils.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fbe08b0b1bd41f2bc59e9f8d188db08423fcf48a --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/utils.py @@ -0,0 +1,140 @@ +import base64 +import math +import re +from io import BytesIO + +import matplotlib.cm +import numpy as np +import torch +import torch.nn +from PIL import Image + + +class RunningAverage: + def __init__(self): + self.avg = 0 + self.count = 0 + + def append(self, value): + self.avg = (value + self.count * self.avg) / (self.count + 1) + self.count += 1 + + def get_value(self): + return self.avg + + +def denormalize(x, device='cpu'): + mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device) + std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device) + return x * std + mean + + +class RunningAverageDict: + def __init__(self): + self._dict = None + + def update(self, new_dict): + if self._dict is None: + self._dict = dict() + for key, value in new_dict.items(): + self._dict[key] = RunningAverage() + + for key, value in new_dict.items(): + self._dict[key].append(value) + + def get_value(self): + return {key: value.get_value() for key, value in self._dict.items()} + + +def colorize(value, vmin=10, vmax=1000, cmap='magma_r'): + value = value.cpu().numpy()[0, :, :] + invalid_mask = value == -1 + + # normalize + vmin = value.min() if vmin is None else vmin + vmax = value.max() if vmax is None else vmax + if vmin != vmax: + value = (value - vmin) / (vmax - vmin) # vmin..vmax + else: + # Avoid 0-division + value = value * 0. + # squeeze last dim if it exists + # value = value.squeeze(axis=0) + cmapper = matplotlib.cm.get_cmap(cmap) + value = cmapper(value, bytes=True) # (nxmx4) + value[invalid_mask] = 255 + img = value[:, :, :3] + + # return img.transpose((2, 0, 1)) + return img + + +def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + +def compute_errors(gt, pred): + thresh = np.maximum((gt / pred), (pred / gt)) + a1 = (thresh < 1.25).mean() + a2 = (thresh < 1.25 ** 2).mean() + a3 = (thresh < 1.25 ** 3).mean() + + abs_rel = np.mean(np.abs(gt - pred) / gt) + sq_rel = np.mean(((gt - pred) ** 2) / gt) + + rmse = (gt - pred) ** 2 + rmse = np.sqrt(rmse.mean()) + + rmse_log = (np.log(gt) - np.log(pred)) ** 2 + rmse_log = np.sqrt(rmse_log.mean()) + + err = np.log(pred) - np.log(gt) + silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100 + + log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean() + return dict(a1=a1, a2=a2, a3=a3, abs_rel=abs_rel, rmse=rmse, log_10=log_10, rmse_log=rmse_log, + silog=silog, sq_rel=sq_rel) + + +##################################### Demo Utilities ############################################ +def b64_to_pil(b64string): + image_data = re.sub('^data:image/.+;base64,', '', b64string) + # image = Image.open(cStringIO.StringIO(image_data)) + return Image.open(BytesIO(base64.b64decode(image_data))) + + +# Compute edge magnitudes +from scipy import ndimage + + +def edges(d): + dx = ndimage.sobel(d, 0) # horizontal derivative + dy = ndimage.sobel(d, 1) # vertical derivative + return np.abs(dx) + np.abs(dy) + + +class PointCloudHelper(): + def __init__(self, width=640, height=480): + self.xx, self.yy = self.worldCoords(width, height) + + def worldCoords(self, width=640, height=480): + hfov_degrees, vfov_degrees = 57, 43 + hFov = math.radians(hfov_degrees) + vFov = math.radians(vfov_degrees) + cx, cy = width / 2, height / 2 + fx = width / (2 * math.tan(hFov / 2)) + fy = height / (2 * math.tan(vFov / 2)) + xx, yy = np.tile(range(width), height), np.repeat(range(height), width) + xx = (xx - cx) / fx + yy = (yy - cy) / fy + return xx, yy + + def depth_to_points(self, depth): + depth[edges(depth) > 0.3] = np.nan # Hide depth edges + length = depth.shape[0] * depth.shape[1] + # depth[edges(depth) > 0.3] = 1e6 # Hide depth edges + z = depth.reshape(length) + + return np.dstack((self.xx * z, self.yy * z, z)).reshape((length, 3)) + +##################################################################################################### diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/__init__.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/__init__.py @@ -0,0 +1,24 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/data_mono.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/data_mono.py new file mode 100644 index 0000000000000000000000000000000000000000..80a8486f239a35331df553f490e213f9bf71e735 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/data_mono.py @@ -0,0 +1,573 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +# This file is partly inspired from BTS (https://github.com/cleinc/bts/blob/master/pytorch/bts_dataloader.py); author: Jin Han Lee + +import itertools +import os +import random + +import numpy as np +import cv2 +import torch +import torch.nn as nn +import torch.utils.data.distributed +from zoedepth.utils.easydict import EasyDict as edict +from PIL import Image, ImageOps +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms + +from zoedepth.utils.config import change_dataset + +from .ddad import get_ddad_loader +from .diml_indoor_test import get_diml_indoor_loader +from .diml_outdoor_test import get_diml_outdoor_loader +from .diode import get_diode_loader +from .hypersim import get_hypersim_loader +from .ibims import get_ibims_loader +from .sun_rgbd_loader import get_sunrgbd_loader +from .vkitti import get_vkitti_loader +from .vkitti2 import get_vkitti2_loader + +from .preprocess import CropParams, get_white_border, get_black_border + + +def _is_pil_image(img): + return isinstance(img, Image.Image) + + +def _is_numpy_image(img): + return isinstance(img, np.ndarray) and (img.ndim in {2, 3}) + + +def preprocessing_transforms(mode, **kwargs): + return transforms.Compose([ + ToTensor(mode=mode, **kwargs) + ]) + + +class DepthDataLoader(object): + def __init__(self, config, mode, device='cpu', transform=None, **kwargs): + """ + Data loader for depth datasets + + Args: + config (dict): Config dictionary. Refer to utils/config.py + mode (str): "train" or "online_eval" + device (str, optional): Device to load the data on. Defaults to 'cpu'. + transform (torchvision.transforms, optional): Transform to apply to the data. Defaults to None. + """ + + self.config = config + + if config.dataset == 'ibims': + self.data = get_ibims_loader(config, batch_size=1, num_workers=1) + return + + if config.dataset == 'sunrgbd': + self.data = get_sunrgbd_loader( + data_dir_root=config.sunrgbd_root, batch_size=1, num_workers=1) + return + + if config.dataset == 'diml_indoor': + self.data = get_diml_indoor_loader( + data_dir_root=config.diml_indoor_root, batch_size=1, num_workers=1) + return + + if config.dataset == 'diml_outdoor': + self.data = get_diml_outdoor_loader( + data_dir_root=config.diml_outdoor_root, batch_size=1, num_workers=1) + return + + if "diode" in config.dataset: + self.data = get_diode_loader( + config[config.dataset+"_root"], batch_size=1, num_workers=1) + return + + if config.dataset == 'hypersim_test': + self.data = get_hypersim_loader( + config.hypersim_test_root, batch_size=1, num_workers=1) + return + + if config.dataset == 'vkitti': + self.data = get_vkitti_loader( + config.vkitti_root, batch_size=1, num_workers=1) + return + + if config.dataset == 'vkitti2': + self.data = get_vkitti2_loader( + config.vkitti2_root, batch_size=1, num_workers=1) + return + + if config.dataset == 'ddad': + self.data = get_ddad_loader(config.ddad_root, resize_shape=( + 352, 1216), batch_size=1, num_workers=1) + return + + img_size = self.config.get("img_size", None) + img_size = img_size if self.config.get( + "do_input_resize", False) else None + + if transform is None: + transform = preprocessing_transforms(mode, size=img_size) + + if mode == 'train': + + Dataset = DataLoadPreprocess + self.training_samples = Dataset( + config, mode, transform=transform, device=device) + + if config.distributed: + self.train_sampler = torch.utils.data.distributed.DistributedSampler( + self.training_samples) + else: + self.train_sampler = None + + self.data = DataLoader(self.training_samples, + batch_size=config.batch_size, + shuffle=(self.train_sampler is None), + num_workers=config.workers, + pin_memory=True, + persistent_workers=True, + # prefetch_factor=2, + sampler=self.train_sampler) + + elif mode == 'online_eval': + self.testing_samples = DataLoadPreprocess( + config, mode, transform=transform) + if config.distributed: # redundant. here only for readability and to be more explicit + # Give whole test set to all processes (and report evaluation only on one) regardless + self.eval_sampler = None + else: + self.eval_sampler = None + self.data = DataLoader(self.testing_samples, 1, + shuffle=kwargs.get("shuffle_test", False), + num_workers=1, + pin_memory=False, + sampler=self.eval_sampler) + + elif mode == 'test': + self.testing_samples = DataLoadPreprocess( + config, mode, transform=transform) + self.data = DataLoader(self.testing_samples, + 1, shuffle=False, num_workers=1) + + else: + print( + 'mode should be one of \'train, test, online_eval\'. Got {}'.format(mode)) + + +def repetitive_roundrobin(*iterables): + """ + cycles through iterables but sample wise + first yield first sample from first iterable then first sample from second iterable and so on + then second sample from first iterable then second sample from second iterable and so on + + If one iterable is shorter than the others, it is repeated until all iterables are exhausted + repetitive_roundrobin('ABC', 'D', 'EF') --> A D E B D F C D E + """ + # Repetitive roundrobin + iterables_ = [iter(it) for it in iterables] + exhausted = [False] * len(iterables) + while not all(exhausted): + for i, it in enumerate(iterables_): + try: + yield next(it) + except StopIteration: + exhausted[i] = True + iterables_[i] = itertools.cycle(iterables[i]) + # First elements may get repeated if one iterable is shorter than the others + yield next(iterables_[i]) + + +class RepetitiveRoundRobinDataLoader(object): + def __init__(self, *dataloaders): + self.dataloaders = dataloaders + + def __iter__(self): + return repetitive_roundrobin(*self.dataloaders) + + def __len__(self): + # First samples get repeated, thats why the plus one + return len(self.dataloaders) * (max(len(dl) for dl in self.dataloaders) + 1) + + +class MixedNYUKITTI(object): + def __init__(self, config, mode, device='cpu', **kwargs): + config = edict(config) + config.workers = config.workers // 2 + self.config = config + nyu_conf = change_dataset(edict(config), 'nyu') + kitti_conf = change_dataset(edict(config), 'kitti') + + # make nyu default for testing + self.config = config = nyu_conf + img_size = self.config.get("img_size", None) + img_size = img_size if self.config.get( + "do_input_resize", False) else None + if mode == 'train': + nyu_loader = DepthDataLoader( + nyu_conf, mode, device=device, transform=preprocessing_transforms(mode, size=img_size)).data + kitti_loader = DepthDataLoader( + kitti_conf, mode, device=device, transform=preprocessing_transforms(mode, size=img_size)).data + # It has been changed to repetitive roundrobin + self.data = RepetitiveRoundRobinDataLoader( + nyu_loader, kitti_loader) + else: + self.data = DepthDataLoader(nyu_conf, mode, device=device).data + + +def remove_leading_slash(s): + if s[0] == '/' or s[0] == '\\': + return s[1:] + return s + + +class CachedReader: + def __init__(self, shared_dict=None): + if shared_dict: + self._cache = shared_dict + else: + self._cache = {} + + def open(self, fpath): + im = self._cache.get(fpath, None) + if im is None: + im = self._cache[fpath] = Image.open(fpath) + return im + + +class ImReader: + def __init__(self): + pass + + # @cache + def open(self, fpath): + return Image.open(fpath) + + +class DataLoadPreprocess(Dataset): + def __init__(self, config, mode, transform=None, is_for_online_eval=False, **kwargs): + self.config = config + if mode == 'online_eval': + with open(config.filenames_file_eval, 'r') as f: + self.filenames = f.readlines() + else: + with open(config.filenames_file, 'r') as f: + self.filenames = f.readlines() + + self.mode = mode + self.transform = transform + self.to_tensor = ToTensor(mode) + self.is_for_online_eval = is_for_online_eval + if config.use_shared_dict: + self.reader = CachedReader(config.shared_dict) + else: + self.reader = ImReader() + + def postprocess(self, sample): + return sample + + def __getitem__(self, idx): + sample_path = self.filenames[idx] + focal = float(sample_path.split()[2]) + sample = {} + + if self.mode == 'train': + if self.config.dataset == 'kitti' and self.config.use_right and random.random() > 0.5: + image_path = os.path.join( + self.config.data_path, remove_leading_slash(sample_path.split()[3])) + depth_path = os.path.join( + self.config.gt_path, remove_leading_slash(sample_path.split()[4])) + else: + image_path = os.path.join( + self.config.data_path, remove_leading_slash(sample_path.split()[0])) + depth_path = os.path.join( + self.config.gt_path, remove_leading_slash(sample_path.split()[1])) + + image = self.reader.open(image_path) + depth_gt = self.reader.open(depth_path) + w, h = image.size + + if self.config.do_kb_crop: + height = image.height + width = image.width + top_margin = int(height - 352) + left_margin = int((width - 1216) / 2) + depth_gt = depth_gt.crop( + (left_margin, top_margin, left_margin + 1216, top_margin + 352)) + image = image.crop( + (left_margin, top_margin, left_margin + 1216, top_margin + 352)) + + # Avoid blank boundaries due to pixel registration? + # Train images have white border. Test images have black border. + if self.config.dataset == 'nyu' and self.config.avoid_boundary: + # print("Avoiding Blank Boundaries!") + # We just crop and pad again with reflect padding to original size + # original_size = image.size + crop_params = get_white_border(np.array(image, dtype=np.uint8)) + image = image.crop((crop_params.left, crop_params.top, crop_params.right, crop_params.bottom)) + depth_gt = depth_gt.crop((crop_params.left, crop_params.top, crop_params.right, crop_params.bottom)) + + # Use reflect padding to fill the blank + image = np.array(image) + image = np.pad(image, ((crop_params.top, h - crop_params.bottom), (crop_params.left, w - crop_params.right), (0, 0)), mode='reflect') + image = Image.fromarray(image) + + depth_gt = np.array(depth_gt) + depth_gt = np.pad(depth_gt, ((crop_params.top, h - crop_params.bottom), (crop_params.left, w - crop_params.right)), 'constant', constant_values=0) + depth_gt = Image.fromarray(depth_gt) + + + if self.config.do_random_rotate and (self.config.aug): + random_angle = (random.random() - 0.5) * 2 * self.config.degree + image = self.rotate_image(image, random_angle) + depth_gt = self.rotate_image( + depth_gt, random_angle, flag=Image.NEAREST) + + image = np.asarray(image, dtype=np.float32) / 255.0 + depth_gt = np.asarray(depth_gt, dtype=np.float32) + depth_gt = np.expand_dims(depth_gt, axis=2) + + if self.config.dataset == 'nyu': + depth_gt = depth_gt / 1000.0 + else: + depth_gt = depth_gt / 256.0 + + if self.config.aug and (self.config.random_crop): + image, depth_gt = self.random_crop( + image, depth_gt, self.config.input_height, self.config.input_width) + + if self.config.aug and self.config.random_translate: + # print("Random Translation!") + image, depth_gt = self.random_translate(image, depth_gt, self.config.max_translation) + + image, depth_gt = self.train_preprocess(image, depth_gt) + mask = np.logical_and(depth_gt > self.config.min_depth, + depth_gt < self.config.max_depth).squeeze()[None, ...] + sample = {'image': image, 'depth': depth_gt, 'focal': focal, + 'mask': mask, **sample} + + else: + if self.mode == 'online_eval': + data_path = self.config.data_path_eval + else: + data_path = self.config.data_path + + image_path = os.path.join( + data_path, remove_leading_slash(sample_path.split()[0])) + image = np.asarray(self.reader.open(image_path), + dtype=np.float32) / 255.0 + + if self.mode == 'online_eval': + gt_path = self.config.gt_path_eval + depth_path = os.path.join( + gt_path, remove_leading_slash(sample_path.split()[1])) + has_valid_depth = False + try: + depth_gt = self.reader.open(depth_path) + has_valid_depth = True + except IOError: + depth_gt = False + # print('Missing gt for {}'.format(image_path)) + + if has_valid_depth: + depth_gt = np.asarray(depth_gt, dtype=np.float32) + depth_gt = np.expand_dims(depth_gt, axis=2) + if self.config.dataset == 'nyu': + depth_gt = depth_gt / 1000.0 + else: + depth_gt = depth_gt / 256.0 + + mask = np.logical_and( + depth_gt >= self.config.min_depth, depth_gt <= self.config.max_depth).squeeze()[None, ...] + else: + mask = False + + if self.config.do_kb_crop: + height = image.shape[0] + width = image.shape[1] + top_margin = int(height - 352) + left_margin = int((width - 1216) / 2) + image = image[top_margin:top_margin + 352, + left_margin:left_margin + 1216, :] + if self.mode == 'online_eval' and has_valid_depth: + depth_gt = depth_gt[top_margin:top_margin + + 352, left_margin:left_margin + 1216, :] + + if self.mode == 'online_eval': + sample = {'image': image, 'depth': depth_gt, 'focal': focal, 'has_valid_depth': has_valid_depth, + 'image_path': sample_path.split()[0], 'depth_path': sample_path.split()[1], + 'mask': mask} + else: + sample = {'image': image, 'focal': focal} + + if (self.mode == 'train') or ('has_valid_depth' in sample and sample['has_valid_depth']): + mask = np.logical_and(depth_gt > self.config.min_depth, + depth_gt < self.config.max_depth).squeeze()[None, ...] + sample['mask'] = mask + + if self.transform: + sample = self.transform(sample) + + sample = self.postprocess(sample) + sample['dataset'] = self.config.dataset + sample = {**sample, 'image_path': sample_path.split()[0], 'depth_path': sample_path.split()[1]} + + return sample + + def rotate_image(self, image, angle, flag=Image.BILINEAR): + result = image.rotate(angle, resample=flag) + return result + + def random_crop(self, img, depth, height, width): + assert img.shape[0] >= height + assert img.shape[1] >= width + assert img.shape[0] == depth.shape[0] + assert img.shape[1] == depth.shape[1] + x = random.randint(0, img.shape[1] - width) + y = random.randint(0, img.shape[0] - height) + img = img[y:y + height, x:x + width, :] + depth = depth[y:y + height, x:x + width, :] + + return img, depth + + def random_translate(self, img, depth, max_t=20): + assert img.shape[0] == depth.shape[0] + assert img.shape[1] == depth.shape[1] + p = self.config.translate_prob + do_translate = random.random() + if do_translate > p: + return img, depth + x = random.randint(-max_t, max_t) + y = random.randint(-max_t, max_t) + M = np.float32([[1, 0, x], [0, 1, y]]) + # print(img.shape, depth.shape) + img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0])) + depth = cv2.warpAffine(depth, M, (depth.shape[1], depth.shape[0])) + depth = depth.squeeze()[..., None] # add channel dim back. Affine warp removes it + # print("after", img.shape, depth.shape) + return img, depth + + def train_preprocess(self, image, depth_gt): + if self.config.aug: + # Random flipping + do_flip = random.random() + if do_flip > 0.5: + image = (image[:, ::-1, :]).copy() + depth_gt = (depth_gt[:, ::-1, :]).copy() + + # Random gamma, brightness, color augmentation + do_augment = random.random() + if do_augment > 0.5: + image = self.augment_image(image) + + return image, depth_gt + + def augment_image(self, image): + # gamma augmentation + gamma = random.uniform(0.9, 1.1) + image_aug = image ** gamma + + # brightness augmentation + if self.config.dataset == 'nyu': + brightness = random.uniform(0.75, 1.25) + else: + brightness = random.uniform(0.9, 1.1) + image_aug = image_aug * brightness + + # color augmentation + colors = np.random.uniform(0.9, 1.1, size=3) + white = np.ones((image.shape[0], image.shape[1])) + color_image = np.stack([white * colors[i] for i in range(3)], axis=2) + image_aug *= color_image + image_aug = np.clip(image_aug, 0, 1) + + return image_aug + + def __len__(self): + return len(self.filenames) + + +class ToTensor(object): + def __init__(self, mode, do_normalize=False, size=None): + self.mode = mode + self.normalize = transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if do_normalize else nn.Identity() + self.size = size + if size is not None: + self.resize = transforms.Resize(size=size) + else: + self.resize = nn.Identity() + + def __call__(self, sample): + image, focal = sample['image'], sample['focal'] + image = self.to_tensor(image) + image = self.normalize(image) + image = self.resize(image) + + if self.mode == 'test': + return {'image': image, 'focal': focal} + + depth = sample['depth'] + if self.mode == 'train': + depth = self.to_tensor(depth) + return {**sample, 'image': image, 'depth': depth, 'focal': focal} + else: + has_valid_depth = sample['has_valid_depth'] + image = self.resize(image) + return {**sample, 'image': image, 'depth': depth, 'focal': focal, 'has_valid_depth': has_valid_depth, + 'image_path': sample['image_path'], 'depth_path': sample['depth_path']} + + def to_tensor(self, pic): + if not (_is_pil_image(pic) or _is_numpy_image(pic)): + raise TypeError( + 'pic should be PIL Image or ndarray. Got {}'.format(type(pic))) + + if isinstance(pic, np.ndarray): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + return img + + # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor( + torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + + img = img.transpose(0, 1).transpose(0, 2).contiguous() + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/ddad.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/ddad.py new file mode 100644 index 0000000000000000000000000000000000000000..4bd0492bdec767685d3a21992b4a26e62d002d97 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/ddad.py @@ -0,0 +1,117 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import os + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms + + +class ToTensor(object): + def __init__(self, resize_shape): + # self.normalize = transforms.Normalize( + # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.normalize = lambda x : x + self.resize = transforms.Resize(resize_shape) + + def __call__(self, sample): + image, depth = sample['image'], sample['depth'] + image = self.to_tensor(image) + image = self.normalize(image) + depth = self.to_tensor(depth) + + image = self.resize(image) + + return {'image': image, 'depth': depth, 'dataset': "ddad"} + + def to_tensor(self, pic): + + if isinstance(pic, np.ndarray): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + return img + + # # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor( + torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + + img = img.transpose(0, 1).transpose(0, 2).contiguous() + + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img + + +class DDAD(Dataset): + def __init__(self, data_dir_root, resize_shape): + import glob + + # image paths are of the form /{outleft, depthmap}/*.png + self.image_files = glob.glob(os.path.join(data_dir_root, '*.png')) + self.depth_files = [r.replace("_rgb.png", "_depth.npy") + for r in self.image_files] + self.transform = ToTensor(resize_shape) + + def __getitem__(self, idx): + + image_path = self.image_files[idx] + depth_path = self.depth_files[idx] + + image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0 + depth = np.load(depth_path) # meters + + # depth[depth > 8] = -1 + depth = depth[..., None] + + sample = dict(image=image, depth=depth) + sample = self.transform(sample) + + if idx == 0: + print(sample["image"].shape) + + return sample + + def __len__(self): + return len(self.image_files) + + +def get_ddad_loader(data_dir_root, resize_shape, batch_size=1, **kwargs): + dataset = DDAD(data_dir_root, resize_shape) + return DataLoader(dataset, batch_size, **kwargs) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/diml_indoor_test.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/diml_indoor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f720ad9aefaee78ef4ec363dfef0f82ace850a6d --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/diml_indoor_test.py @@ -0,0 +1,125 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import os + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms + + +class ToTensor(object): + def __init__(self): + # self.normalize = transforms.Normalize( + # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.normalize = lambda x : x + self.resize = transforms.Resize((480, 640)) + + def __call__(self, sample): + image, depth = sample['image'], sample['depth'] + image = self.to_tensor(image) + image = self.normalize(image) + depth = self.to_tensor(depth) + + image = self.resize(image) + + return {'image': image, 'depth': depth, 'dataset': "diml_indoor"} + + def to_tensor(self, pic): + + if isinstance(pic, np.ndarray): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + return img + + # # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor( + torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + + img = img.transpose(0, 1).transpose(0, 2).contiguous() + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img + + +class DIML_Indoor(Dataset): + def __init__(self, data_dir_root): + import glob + + # image paths are of the form /{HR, LR}//{color, depth_filled}/*.png + self.image_files = glob.glob(os.path.join( + data_dir_root, "LR", '*', 'color', '*.png')) + self.depth_files = [r.replace("color", "depth_filled").replace( + "_c.png", "_depth_filled.png") for r in self.image_files] + self.transform = ToTensor() + + def __getitem__(self, idx): + image_path = self.image_files[idx] + depth_path = self.depth_files[idx] + + image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0 + depth = np.asarray(Image.open(depth_path), + dtype='uint16') / 1000.0 # mm to meters + + # print(np.shape(image)) + # print(np.shape(depth)) + + # depth[depth > 8] = -1 + depth = depth[..., None] + + sample = dict(image=image, depth=depth) + + # return sample + sample = self.transform(sample) + + if idx == 0: + print(sample["image"].shape) + + return sample + + def __len__(self): + return len(self.image_files) + + +def get_diml_indoor_loader(data_dir_root, batch_size=1, **kwargs): + dataset = DIML_Indoor(data_dir_root) + return DataLoader(dataset, batch_size, **kwargs) + +# get_diml_indoor_loader(data_dir_root="datasets/diml/indoor/test/HR") +# get_diml_indoor_loader(data_dir_root="datasets/diml/indoor/test/LR") diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/diml_outdoor_test.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/diml_outdoor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8670b48f5febafb819dac22848ad79ccb5dd5ae4 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/diml_outdoor_test.py @@ -0,0 +1,114 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import os + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms + + +class ToTensor(object): + def __init__(self): + # self.normalize = transforms.Normalize( + # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.normalize = lambda x : x + + def __call__(self, sample): + image, depth = sample['image'], sample['depth'] + image = self.to_tensor(image) + image = self.normalize(image) + depth = self.to_tensor(depth) + + return {'image': image, 'depth': depth, 'dataset': "diml_outdoor"} + + def to_tensor(self, pic): + + if isinstance(pic, np.ndarray): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + return img + + # # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor( + torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + + img = img.transpose(0, 1).transpose(0, 2).contiguous() + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img + + +class DIML_Outdoor(Dataset): + def __init__(self, data_dir_root): + import glob + + # image paths are of the form /{outleft, depthmap}/*.png + self.image_files = glob.glob(os.path.join( + data_dir_root, "*", 'outleft', '*.png')) + self.depth_files = [r.replace("outleft", "depthmap") + for r in self.image_files] + self.transform = ToTensor() + + def __getitem__(self, idx): + image_path = self.image_files[idx] + depth_path = self.depth_files[idx] + + image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0 + depth = np.asarray(Image.open(depth_path), + dtype='uint16') / 1000.0 # mm to meters + + # depth[depth > 8] = -1 + depth = depth[..., None] + + sample = dict(image=image, depth=depth, dataset="diml_outdoor") + + # return sample + return self.transform(sample) + + def __len__(self): + return len(self.image_files) + + +def get_diml_outdoor_loader(data_dir_root, batch_size=1, **kwargs): + dataset = DIML_Outdoor(data_dir_root) + return DataLoader(dataset, batch_size, **kwargs) + +# get_diml_outdoor_loader(data_dir_root="datasets/diml/outdoor/test/HR") +# get_diml_outdoor_loader(data_dir_root="datasets/diml/outdoor/test/LR") diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/diode.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/diode.py new file mode 100644 index 0000000000000000000000000000000000000000..1510c87116b8f70ce2e1428873a8e4da042bee23 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/diode.py @@ -0,0 +1,125 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import os + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms + + +class ToTensor(object): + def __init__(self): + # self.normalize = transforms.Normalize( + # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.normalize = lambda x : x + self.resize = transforms.Resize(480) + + def __call__(self, sample): + image, depth = sample['image'], sample['depth'] + image = self.to_tensor(image) + image = self.normalize(image) + depth = self.to_tensor(depth) + + image = self.resize(image) + + return {'image': image, 'depth': depth, 'dataset': "diode"} + + def to_tensor(self, pic): + + if isinstance(pic, np.ndarray): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + return img + + # # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor( + torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + + img = img.transpose(0, 1).transpose(0, 2).contiguous() + + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img + + +class DIODE(Dataset): + def __init__(self, data_dir_root): + import glob + + # image paths are of the form /scene_#/scan_#/*.png + self.image_files = glob.glob( + os.path.join(data_dir_root, '*', '*', '*.png')) + self.depth_files = [r.replace(".png", "_depth.npy") + for r in self.image_files] + self.depth_mask_files = [ + r.replace(".png", "_depth_mask.npy") for r in self.image_files] + self.transform = ToTensor() + + def __getitem__(self, idx): + image_path = self.image_files[idx] + depth_path = self.depth_files[idx] + depth_mask_path = self.depth_mask_files[idx] + + image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0 + depth = np.load(depth_path) # in meters + valid = np.load(depth_mask_path) # binary + + # depth[depth > 8] = -1 + # depth = depth[..., None] + + sample = dict(image=image, depth=depth, valid=valid) + + # return sample + sample = self.transform(sample) + + if idx == 0: + print(sample["image"].shape) + + return sample + + def __len__(self): + return len(self.image_files) + + +def get_diode_loader(data_dir_root, batch_size=1, **kwargs): + dataset = DIODE(data_dir_root) + return DataLoader(dataset, batch_size, **kwargs) + +# get_diode_loader(data_dir_root="datasets/diode/val/outdoor") diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/hypersim.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/hypersim.py new file mode 100644 index 0000000000000000000000000000000000000000..4334198971830200f72ea2910d03f4c7d6a43334 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/hypersim.py @@ -0,0 +1,138 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import glob +import os + +import h5py +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms + + +def hypersim_distance_to_depth(npyDistance): + intWidth, intHeight, fltFocal = 1024, 768, 886.81 + + npyImageplaneX = np.linspace((-0.5 * intWidth) + 0.5, (0.5 * intWidth) - 0.5, intWidth).reshape( + 1, intWidth).repeat(intHeight, 0).astype(np.float32)[:, :, None] + npyImageplaneY = np.linspace((-0.5 * intHeight) + 0.5, (0.5 * intHeight) - 0.5, + intHeight).reshape(intHeight, 1).repeat(intWidth, 1).astype(np.float32)[:, :, None] + npyImageplaneZ = np.full([intHeight, intWidth, 1], fltFocal, np.float32) + npyImageplane = np.concatenate( + [npyImageplaneX, npyImageplaneY, npyImageplaneZ], 2) + + npyDepth = npyDistance / np.linalg.norm(npyImageplane, 2, 2) * fltFocal + return npyDepth + + +class ToTensor(object): + def __init__(self): + # self.normalize = transforms.Normalize( + # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.normalize = lambda x: x + self.resize = transforms.Resize((480, 640)) + + def __call__(self, sample): + image, depth = sample['image'], sample['depth'] + image = self.to_tensor(image) + image = self.normalize(image) + depth = self.to_tensor(depth) + + image = self.resize(image) + + return {'image': image, 'depth': depth, 'dataset': "hypersim"} + + def to_tensor(self, pic): + + if isinstance(pic, np.ndarray): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + return img + + # # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor( + torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + + img = img.transpose(0, 1).transpose(0, 2).contiguous() + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img + + +class HyperSim(Dataset): + def __init__(self, data_dir_root): + # image paths are of the form //images/scene_cam_#_final_preview/*.tonemap.jpg + # depth paths are of the form //images/scene_cam_#_final_preview/*.depth_meters.hdf5 + self.image_files = glob.glob(os.path.join( + data_dir_root, '*', 'images', 'scene_cam_*_final_preview', '*.tonemap.jpg')) + self.depth_files = [r.replace("_final_preview", "_geometry_hdf5").replace( + ".tonemap.jpg", ".depth_meters.hdf5") for r in self.image_files] + self.transform = ToTensor() + + def __getitem__(self, idx): + image_path = self.image_files[idx] + depth_path = self.depth_files[idx] + + image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0 + + # depth from hdf5 + depth_fd = h5py.File(depth_path, "r") + # in meters (Euclidean distance) + distance_meters = np.array(depth_fd['dataset']) + depth = hypersim_distance_to_depth( + distance_meters) # in meters (planar depth) + + # depth[depth > 8] = -1 + depth = depth[..., None] + + sample = dict(image=image, depth=depth) + sample = self.transform(sample) + + if idx == 0: + print(sample["image"].shape) + + return sample + + def __len__(self): + return len(self.image_files) + + +def get_hypersim_loader(data_dir_root, batch_size=1, **kwargs): + dataset = HyperSim(data_dir_root) + return DataLoader(dataset, batch_size, **kwargs) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/ibims.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/ibims.py new file mode 100644 index 0000000000000000000000000000000000000000..b66abfabcf4cfc617d4a60ec818780c3548d9920 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/ibims.py @@ -0,0 +1,81 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import os + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms as T + + +class iBims(Dataset): + def __init__(self, config): + root_folder = config.ibims_root + with open(os.path.join(root_folder, "imagelist.txt"), 'r') as f: + imglist = f.read().split() + + samples = [] + for basename in imglist: + img_path = os.path.join(root_folder, 'rgb', basename + ".png") + depth_path = os.path.join(root_folder, 'depth', basename + ".png") + valid_mask_path = os.path.join( + root_folder, 'mask_invalid', basename+".png") + transp_mask_path = os.path.join( + root_folder, 'mask_transp', basename+".png") + + samples.append( + (img_path, depth_path, valid_mask_path, transp_mask_path)) + + self.samples = samples + # self.normalize = T.Normalize( + # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.normalize = lambda x : x + + def __getitem__(self, idx): + img_path, depth_path, valid_mask_path, transp_mask_path = self.samples[idx] + + img = np.asarray(Image.open(img_path), dtype=np.float32) / 255.0 + depth = np.asarray(Image.open(depth_path), + dtype=np.uint16).astype('float')*50.0/65535 + + mask_valid = np.asarray(Image.open(valid_mask_path)) + mask_transp = np.asarray(Image.open(transp_mask_path)) + + # depth = depth * mask_valid * mask_transp + depth = np.where(mask_valid * mask_transp, depth, -1) + + img = torch.from_numpy(img).permute(2, 0, 1) + img = self.normalize(img) + depth = torch.from_numpy(depth).unsqueeze(0) + return dict(image=img, depth=depth, image_path=img_path, depth_path=depth_path, dataset='ibims') + + def __len__(self): + return len(self.samples) + + +def get_ibims_loader(config, batch_size=1, **kwargs): + dataloader = DataLoader(iBims(config), batch_size=batch_size, **kwargs) + return dataloader diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/preprocess.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..e08cc309dc823ae6efd7cda8db9eb37130dc5499 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/preprocess.py @@ -0,0 +1,154 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import numpy as np +from dataclasses import dataclass +from typing import Tuple, List + +# dataclass to store the crop parameters +@dataclass +class CropParams: + top: int + bottom: int + left: int + right: int + + + +def get_border_params(rgb_image, tolerance=0.1, cut_off=20, value=0, level_diff_threshold=5, channel_axis=-1, min_border=5) -> CropParams: + gray_image = np.mean(rgb_image, axis=channel_axis) + h, w = gray_image.shape + + + def num_value_pixels(arr): + return np.sum(np.abs(arr - value) < level_diff_threshold) + + def is_above_tolerance(arr, total_pixels): + return (num_value_pixels(arr) / total_pixels) > tolerance + + # Crop top border until number of value pixels become below tolerance + top = min_border + while is_above_tolerance(gray_image[top, :], w) and top < h-1: + top += 1 + if top > cut_off: + break + + # Crop bottom border until number of value pixels become below tolerance + bottom = h - min_border + while is_above_tolerance(gray_image[bottom, :], w) and bottom > 0: + bottom -= 1 + if h - bottom > cut_off: + break + + # Crop left border until number of value pixels become below tolerance + left = min_border + while is_above_tolerance(gray_image[:, left], h) and left < w-1: + left += 1 + if left > cut_off: + break + + # Crop right border until number of value pixels become below tolerance + right = w - min_border + while is_above_tolerance(gray_image[:, right], h) and right > 0: + right -= 1 + if w - right > cut_off: + break + + + return CropParams(top, bottom, left, right) + + +def get_white_border(rgb_image, value=255, **kwargs) -> CropParams: + """Crops the white border of the RGB. + + Args: + rgb: RGB image, shape (H, W, 3). + Returns: + Crop parameters. + """ + if value == 255: + # assert range of values in rgb image is [0, 255] + assert np.max(rgb_image) <= 255 and np.min(rgb_image) >= 0, "RGB image values are not in range [0, 255]." + assert rgb_image.max() > 1, "RGB image values are not in range [0, 255]." + elif value == 1: + # assert range of values in rgb image is [0, 1] + assert np.max(rgb_image) <= 1 and np.min(rgb_image) >= 0, "RGB image values are not in range [0, 1]." + + return get_border_params(rgb_image, value=value, **kwargs) + +def get_black_border(rgb_image, **kwargs) -> CropParams: + """Crops the black border of the RGB. + + Args: + rgb: RGB image, shape (H, W, 3). + + Returns: + Crop parameters. + """ + + return get_border_params(rgb_image, value=0, **kwargs) + +def crop_image(image: np.ndarray, crop_params: CropParams) -> np.ndarray: + """Crops the image according to the crop parameters. + + Args: + image: RGB or depth image, shape (H, W, 3) or (H, W). + crop_params: Crop parameters. + + Returns: + Cropped image. + """ + return image[crop_params.top:crop_params.bottom, crop_params.left:crop_params.right] + +def crop_images(*images: np.ndarray, crop_params: CropParams) -> Tuple[np.ndarray]: + """Crops the images according to the crop parameters. + + Args: + images: RGB or depth images, shape (H, W, 3) or (H, W). + crop_params: Crop parameters. + + Returns: + Cropped images. + """ + return tuple(crop_image(image, crop_params) for image in images) + +def crop_black_or_white_border(rgb_image, *other_images: np.ndarray, tolerance=0.1, cut_off=20, level_diff_threshold=5) -> Tuple[np.ndarray]: + """Crops the white and black border of the RGB and depth images. + + Args: + rgb: RGB image, shape (H, W, 3). This image is used to determine the border. + other_images: The other images to crop according to the border of the RGB image. + Returns: + Cropped RGB and other images. + """ + # crop black border + crop_params = get_black_border(rgb_image, tolerance=tolerance, cut_off=cut_off, level_diff_threshold=level_diff_threshold) + cropped_images = crop_images(rgb_image, *other_images, crop_params=crop_params) + + # crop white border + crop_params = get_white_border(cropped_images[0], tolerance=tolerance, cut_off=cut_off, level_diff_threshold=level_diff_threshold) + cropped_images = crop_images(*cropped_images, crop_params=crop_params) + + return cropped_images + \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/sun_rgbd_loader.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/sun_rgbd_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..9e2bdb9aefe68ca4439f41eff3bba722c49fb976 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/sun_rgbd_loader.py @@ -0,0 +1,106 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import os + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms + + +class ToTensor(object): + def __init__(self): + # self.normalize = transforms.Normalize( + # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.normalize = lambda x : x + + def __call__(self, sample): + image, depth = sample['image'], sample['depth'] + image = self.to_tensor(image) + image = self.normalize(image) + depth = self.to_tensor(depth) + + return {'image': image, 'depth': depth, 'dataset': "sunrgbd"} + + def to_tensor(self, pic): + + if isinstance(pic, np.ndarray): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + return img + + # # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor( + torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + + img = img.transpose(0, 1).transpose(0, 2).contiguous() + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img + + +class SunRGBD(Dataset): + def __init__(self, data_dir_root): + # test_file_dirs = loadmat(train_test_file)['alltest'].squeeze() + # all_test = [t[0].replace("/n/fs/sun3d/data/", "") for t in test_file_dirs] + # self.all_test = [os.path.join(data_dir_root, t) for t in all_test] + import glob + self.image_files = glob.glob( + os.path.join(data_dir_root, 'rgb', 'rgb', '*')) + self.depth_files = [ + r.replace("rgb/rgb", "gt/gt").replace("jpg", "png") for r in self.image_files] + self.transform = ToTensor() + + def __getitem__(self, idx): + image_path = self.image_files[idx] + depth_path = self.depth_files[idx] + + image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0 + depth = np.asarray(Image.open(depth_path), dtype='uint16') / 1000.0 + depth[depth > 8] = -1 + depth = depth[..., None] + return self.transform(dict(image=image, depth=depth)) + + def __len__(self): + return len(self.image_files) + + +def get_sunrgbd_loader(data_dir_root, batch_size=1, **kwargs): + dataset = SunRGBD(data_dir_root) + return DataLoader(dataset, batch_size, **kwargs) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/transforms.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..374416dff24fb4fd55598f3946d6d6b091ddefc9 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/transforms.py @@ -0,0 +1,481 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import math +import random + +import cv2 +import numpy as np + + +class RandomFliplr(object): + """Horizontal flip of the sample with given probability. + """ + + def __init__(self, probability=0.5): + """Init. + + Args: + probability (float, optional): Flip probability. Defaults to 0.5. + """ + self.__probability = probability + + def __call__(self, sample): + prob = random.random() + + if prob < self.__probability: + for k, v in sample.items(): + if len(v.shape) >= 2: + sample[k] = np.fliplr(v).copy() + + return sample + + +def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): + """Rezise the sample to ensure the given size. Keeps aspect ratio. + + Args: + sample (dict): sample + size (tuple): image size + + Returns: + tuple: new size + """ + shape = list(sample["disparity"].shape) + + if shape[0] >= size[0] and shape[1] >= size[1]: + return sample + + scale = [0, 0] + scale[0] = size[0] / shape[0] + scale[1] = size[1] / shape[1] + + scale = max(scale) + + shape[0] = math.ceil(scale * shape[0]) + shape[1] = math.ceil(scale * shape[1]) + + # resize + sample["image"] = cv2.resize( + sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method + ) + + sample["disparity"] = cv2.resize( + sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST + ) + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + tuple(shape[::-1]), + interpolation=cv2.INTER_NEAREST, + ) + sample["mask"] = sample["mask"].astype(bool) + + return tuple(shape) + + +class RandomCrop(object): + """Get a random crop of the sample with the given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_if_needed=False, + image_interpolation_method=cv2.INTER_AREA, + ): + """Init. + + Args: + width (int): output width + height (int): output height + resize_if_needed (bool, optional): If True, sample might be upsampled to ensure + that a crop of size (width, height) is possbile. Defaults to False. + """ + self.__size = (height, width) + self.__resize_if_needed = resize_if_needed + self.__image_interpolation_method = image_interpolation_method + + def __call__(self, sample): + + shape = sample["disparity"].shape + + if self.__size[0] > shape[0] or self.__size[1] > shape[1]: + if self.__resize_if_needed: + shape = apply_min_size( + sample, self.__size, self.__image_interpolation_method + ) + else: + raise Exception( + "Output size {} bigger than input size {}.".format( + self.__size, shape + ) + ) + + offset = ( + np.random.randint(shape[0] - self.__size[0] + 1), + np.random.randint(shape[1] - self.__size[1] + 1), + ) + + for k, v in sample.items(): + if k == "code" or k == "basis": + continue + + if len(sample[k].shape) >= 2: + sample[k] = v[ + offset[0]: offset[0] + self.__size[0], + offset[1]: offset[1] + self.__size[1], + ] + + return sample + + +class Resize(object): + """Resize sample to given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_target=True, + keep_aspect_ratio=False, + ensure_multiple_of=1, + resize_method="lower_bound", + image_interpolation_method=cv2.INTER_AREA, + letter_box=False, + ): + """Init. + + Args: + width (int): desired output width + height (int): desired output height + resize_target (bool, optional): + True: Resize the full sample (image, mask, target). + False: Resize image only. + Defaults to True. + keep_aspect_ratio (bool, optional): + True: Keep the aspect ratio of the input sample. + Output sample might not have the given width and height, and + resize behaviour depends on the parameter 'resize_method'. + Defaults to False. + ensure_multiple_of (int, optional): + Output width and height is constrained to be multiple of this parameter. + Defaults to 1. + resize_method (str, optional): + "lower_bound": Output will be at least as large as the given size. + "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) + "minimal": Scale as least as possible. (Output size might be smaller than given size.) + Defaults to "lower_bound". + """ + self.__width = width + self.__height = height + + self.__resize_target = resize_target + self.__keep_aspect_ratio = keep_aspect_ratio + self.__multiple_of = ensure_multiple_of + self.__resize_method = resize_method + self.__image_interpolation_method = image_interpolation_method + self.__letter_box = letter_box + + def constrain_to_multiple_of(self, x, min_val=0, max_val=None): + y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if max_val is not None and y > max_val: + y = (np.floor(x / self.__multiple_of) + * self.__multiple_of).astype(int) + + if y < min_val: + y = (np.ceil(x / self.__multiple_of) + * self.__multiple_of).astype(int) + + return y + + def get_size(self, width, height): + # determine new height and width + scale_height = self.__height / height + scale_width = self.__width / width + + if self.__keep_aspect_ratio: + if self.__resize_method == "lower_bound": + # scale such that output size is lower bound + if scale_width > scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "upper_bound": + # scale such that output size is upper bound + if scale_width < scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "minimal": + # scale as least as possbile + if abs(1 - scale_width) < abs(1 - scale_height): + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented" + ) + + if self.__resize_method == "lower_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, min_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, min_val=self.__width + ) + elif self.__resize_method == "upper_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, max_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, max_val=self.__width + ) + elif self.__resize_method == "minimal": + new_height = self.constrain_to_multiple_of(scale_height * height) + new_width = self.constrain_to_multiple_of(scale_width * width) + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented") + + return (new_width, new_height) + + def make_letter_box(self, sample): + top = bottom = (self.__height - sample.shape[0]) // 2 + left = right = (self.__width - sample.shape[1]) // 2 + sample = cv2.copyMakeBorder( + sample, top, bottom, left, right, cv2.BORDER_CONSTANT, None, 0) + return sample + + def __call__(self, sample): + width, height = self.get_size( + sample["image"].shape[1], sample["image"].shape[0] + ) + + # resize sample + sample["image"] = cv2.resize( + sample["image"], + (width, height), + interpolation=self.__image_interpolation_method, + ) + + if self.__letter_box: + sample["image"] = self.make_letter_box(sample["image"]) + + if self.__resize_target: + if "disparity" in sample: + sample["disparity"] = cv2.resize( + sample["disparity"], + (width, height), + interpolation=cv2.INTER_NEAREST, + ) + + if self.__letter_box: + sample["disparity"] = self.make_letter_box( + sample["disparity"]) + + if "depth" in sample: + sample["depth"] = cv2.resize( + sample["depth"], (width, + height), interpolation=cv2.INTER_NEAREST + ) + + if self.__letter_box: + sample["depth"] = self.make_letter_box(sample["depth"]) + + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + (width, height), + interpolation=cv2.INTER_NEAREST, + ) + + if self.__letter_box: + sample["mask"] = self.make_letter_box(sample["mask"]) + + sample["mask"] = sample["mask"].astype(bool) + + return sample + + +class ResizeFixed(object): + def __init__(self, size): + self.__size = size + + def __call__(self, sample): + sample["image"] = cv2.resize( + sample["image"], self.__size[::-1], interpolation=cv2.INTER_LINEAR + ) + + sample["disparity"] = cv2.resize( + sample["disparity"], self.__size[::- + 1], interpolation=cv2.INTER_NEAREST + ) + + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + self.__size[::-1], + interpolation=cv2.INTER_NEAREST, + ) + sample["mask"] = sample["mask"].astype(bool) + + return sample + + +class Rescale(object): + """Rescale target values to the interval [0, max_val]. + If input is constant, values are set to max_val / 2. + """ + + def __init__(self, max_val=1.0, use_mask=True): + """Init. + + Args: + max_val (float, optional): Max output value. Defaults to 1.0. + use_mask (bool, optional): Only operate on valid pixels (mask == True). Defaults to True. + """ + self.__max_val = max_val + self.__use_mask = use_mask + + def __call__(self, sample): + disp = sample["disparity"] + + if self.__use_mask: + mask = sample["mask"] + else: + mask = np.ones_like(disp, dtype=np.bool) + + if np.sum(mask) == 0: + return sample + + min_val = np.min(disp[mask]) + max_val = np.max(disp[mask]) + + if max_val > min_val: + sample["disparity"][mask] = ( + (disp[mask] - min_val) / (max_val - min_val) * self.__max_val + ) + else: + sample["disparity"][mask] = np.ones_like( + disp[mask]) * self.__max_val / 2.0 + + return sample + + +# mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] +class NormalizeImage(object): + """Normlize image by given mean and std. + """ + + def __init__(self, mean, std): + self.__mean = mean + self.__std = std + + def __call__(self, sample): + sample["image"] = (sample["image"] - self.__mean) / self.__std + + return sample + + +class DepthToDisparity(object): + """Convert depth to disparity. Removes depth from sample. + """ + + def __init__(self, eps=1e-4): + self.__eps = eps + + def __call__(self, sample): + assert "depth" in sample + + sample["mask"][sample["depth"] < self.__eps] = False + + sample["disparity"] = np.zeros_like(sample["depth"]) + sample["disparity"][sample["depth"] >= self.__eps] = ( + 1.0 / sample["depth"][sample["depth"] >= self.__eps] + ) + + del sample["depth"] + + return sample + + +class DisparityToDepth(object): + """Convert disparity to depth. Removes disparity from sample. + """ + + def __init__(self, eps=1e-4): + self.__eps = eps + + def __call__(self, sample): + assert "disparity" in sample + + disp = np.abs(sample["disparity"]) + sample["mask"][disp < self.__eps] = False + + # print(sample["disparity"]) + # print(sample["mask"].sum()) + # exit() + + sample["depth"] = np.zeros_like(disp) + sample["depth"][disp >= self.__eps] = ( + 1.0 / disp[disp >= self.__eps] + ) + + del sample["disparity"] + + return sample + + +class PrepareForNet(object): + """Prepare sample for usage as network input. + """ + + def __init__(self): + pass + + def __call__(self, sample): + image = np.transpose(sample["image"], (2, 0, 1)) + sample["image"] = np.ascontiguousarray(image).astype(np.float32) + + if "mask" in sample: + sample["mask"] = sample["mask"].astype(np.float32) + sample["mask"] = np.ascontiguousarray(sample["mask"]) + + if "disparity" in sample: + disparity = sample["disparity"].astype(np.float32) + sample["disparity"] = np.ascontiguousarray(disparity) + + if "depth" in sample: + depth = sample["depth"].astype(np.float32) + sample["depth"] = np.ascontiguousarray(depth) + + return sample diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/vkitti.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/vkitti.py new file mode 100644 index 0000000000000000000000000000000000000000..72a2e5a8346f6e630ede0e28d6959725af8d7e72 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/vkitti.py @@ -0,0 +1,151 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +from torch.utils.data import Dataset, DataLoader +from torchvision import transforms +import os + +from PIL import Image +import numpy as np +import cv2 + + +class ToTensor(object): + def __init__(self): + self.normalize = transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + # self.resize = transforms.Resize((375, 1242)) + + def __call__(self, sample): + image, depth = sample['image'], sample['depth'] + + image = self.to_tensor(image) + image = self.normalize(image) + depth = self.to_tensor(depth) + + # image = self.resize(image) + + return {'image': image, 'depth': depth, 'dataset': "vkitti"} + + def to_tensor(self, pic): + + if isinstance(pic, np.ndarray): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + return img + + # # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor( + torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + + img = img.transpose(0, 1).transpose(0, 2).contiguous() + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img + + +class VKITTI(Dataset): + def __init__(self, data_dir_root, do_kb_crop=True): + import glob + # image paths are of the form /{HR, LR}//{color, depth_filled}/*.png + self.image_files = glob.glob(os.path.join( + data_dir_root, "test_color", '*.png')) + self.depth_files = [r.replace("test_color", "test_depth") + for r in self.image_files] + self.do_kb_crop = True + self.transform = ToTensor() + + def __getitem__(self, idx): + image_path = self.image_files[idx] + depth_path = self.depth_files[idx] + + image = Image.open(image_path) + depth = Image.open(depth_path) + depth = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR | + cv2.IMREAD_ANYDEPTH) + print("dpeth min max", depth.min(), depth.max()) + + # print(np.shape(image)) + # print(np.shape(depth)) + + # depth[depth > 8] = -1 + + if self.do_kb_crop and False: + height = image.height + width = image.width + top_margin = int(height - 352) + left_margin = int((width - 1216) / 2) + depth = depth.crop( + (left_margin, top_margin, left_margin + 1216, top_margin + 352)) + image = image.crop( + (left_margin, top_margin, left_margin + 1216, top_margin + 352)) + # uv = uv[:, top_margin:top_margin + 352, left_margin:left_margin + 1216] + + image = np.asarray(image, dtype=np.float32) / 255.0 + # depth = np.asarray(depth, dtype=np.uint16) /1. + depth = depth[..., None] + sample = dict(image=image, depth=depth) + + # return sample + sample = self.transform(sample) + + if idx == 0: + print(sample["image"].shape) + + return sample + + def __len__(self): + return len(self.image_files) + + +def get_vkitti_loader(data_dir_root, batch_size=1, **kwargs): + dataset = VKITTI(data_dir_root) + return DataLoader(dataset, batch_size, **kwargs) + + +if __name__ == "__main__": + loader = get_vkitti_loader( + data_dir_root="/home/bhatsf/shortcuts/datasets/vkitti_test") + print("Total files", len(loader.dataset)) + for i, sample in enumerate(loader): + print(sample["image"].shape) + print(sample["depth"].shape) + print(sample["dataset"]) + print(sample['depth'].min(), sample['depth'].max()) + if i > 5: + break diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/vkitti2.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/vkitti2.py new file mode 100644 index 0000000000000000000000000000000000000000..9bcfb0414b7f3f21859f30ae34bd71689516a3e7 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/data/vkitti2.py @@ -0,0 +1,187 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import os + +import cv2 +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms + + +class ToTensor(object): + def __init__(self): + # self.normalize = transforms.Normalize( + # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.normalize = lambda x: x + # self.resize = transforms.Resize((375, 1242)) + + def __call__(self, sample): + image, depth = sample['image'], sample['depth'] + + image = self.to_tensor(image) + image = self.normalize(image) + depth = self.to_tensor(depth) + + # image = self.resize(image) + + return {'image': image, 'depth': depth, 'dataset': "vkitti"} + + def to_tensor(self, pic): + + if isinstance(pic, np.ndarray): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + return img + + # # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor( + torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + + img = img.transpose(0, 1).transpose(0, 2).contiguous() + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img + + +class VKITTI2(Dataset): + def __init__(self, data_dir_root, do_kb_crop=True, split="test"): + import glob + + # image paths are of the form /rgb///frames//Camera<0,1>/rgb_{}.jpg + self.image_files = glob.glob(os.path.join( + data_dir_root, "rgb", "**", "frames", "rgb", "Camera_0", '*.jpg'), recursive=True) + self.depth_files = [r.replace("/rgb/", "/depth/").replace( + "rgb_", "depth_").replace(".jpg", ".png") for r in self.image_files] + self.do_kb_crop = True + self.transform = ToTensor() + + # If train test split is not created, then create one. + # Split is such that 8% of the frames from each scene are used for testing. + if not os.path.exists(os.path.join(data_dir_root, "train.txt")): + import random + scenes = set([os.path.basename(os.path.dirname( + os.path.dirname(os.path.dirname(f)))) for f in self.image_files]) + train_files = [] + test_files = [] + for scene in scenes: + scene_files = [f for f in self.image_files if os.path.basename( + os.path.dirname(os.path.dirname(os.path.dirname(f)))) == scene] + random.shuffle(scene_files) + train_files.extend(scene_files[:int(len(scene_files) * 0.92)]) + test_files.extend(scene_files[int(len(scene_files) * 0.92):]) + with open(os.path.join(data_dir_root, "train.txt"), "w") as f: + f.write("\n".join(train_files)) + with open(os.path.join(data_dir_root, "test.txt"), "w") as f: + f.write("\n".join(test_files)) + + if split == "train": + with open(os.path.join(data_dir_root, "train.txt"), "r") as f: + self.image_files = f.read().splitlines() + self.depth_files = [r.replace("/rgb/", "/depth/").replace( + "rgb_", "depth_").replace(".jpg", ".png") for r in self.image_files] + elif split == "test": + with open(os.path.join(data_dir_root, "test.txt"), "r") as f: + self.image_files = f.read().splitlines() + self.depth_files = [r.replace("/rgb/", "/depth/").replace( + "rgb_", "depth_").replace(".jpg", ".png") for r in self.image_files] + + def __getitem__(self, idx): + image_path = self.image_files[idx] + depth_path = self.depth_files[idx] + + image = Image.open(image_path) + # depth = Image.open(depth_path) + depth = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR | + cv2.IMREAD_ANYDEPTH) / 100.0 # cm to m + depth = Image.fromarray(depth) + # print("dpeth min max", depth.min(), depth.max()) + + # print(np.shape(image)) + # print(np.shape(depth)) + + if self.do_kb_crop: + if idx == 0: + print("Using KB input crop") + height = image.height + width = image.width + top_margin = int(height - 352) + left_margin = int((width - 1216) / 2) + depth = depth.crop( + (left_margin, top_margin, left_margin + 1216, top_margin + 352)) + image = image.crop( + (left_margin, top_margin, left_margin + 1216, top_margin + 352)) + # uv = uv[:, top_margin:top_margin + 352, left_margin:left_margin + 1216] + + image = np.asarray(image, dtype=np.float32) / 255.0 + # depth = np.asarray(depth, dtype=np.uint16) /1. + depth = np.asarray(depth, dtype=np.float32) / 1. + depth[depth > 80] = -1 + + depth = depth[..., None] + sample = dict(image=image, depth=depth) + + # return sample + sample = self.transform(sample) + + if idx == 0: + print(sample["image"].shape) + + return sample + + def __len__(self): + return len(self.image_files) + + +def get_vkitti2_loader(data_dir_root, batch_size=1, **kwargs): + dataset = VKITTI2(data_dir_root) + return DataLoader(dataset, batch_size, **kwargs) + + +if __name__ == "__main__": + loader = get_vkitti2_loader( + data_dir_root="/home/bhatsf/shortcuts/datasets/vkitti2") + print("Total files", len(loader.dataset)) + for i, sample in enumerate(loader): + print(sample["image"].shape) + print(sample["depth"].shape) + print(sample["dataset"]) + print(sample['depth'].min(), sample['depth'].max()) + if i > 5: + break diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/__init__.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/__init__.py @@ -0,0 +1,24 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/base_models/__init__.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/base_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/base_models/__init__.py @@ -0,0 +1,24 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/base_models/midas.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/base_models/midas.py new file mode 100644 index 0000000000000000000000000000000000000000..e26f8589502f8298bde8820262083f54b254f70e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/base_models/midas.py @@ -0,0 +1,377 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn +import numpy as np +from torchvision.transforms import Normalize + + +def denormalize(x): + """Reverses the imagenet normalization applied to the input. + + Args: + x (torch.Tensor - shape(N,3,H,W)): input tensor + + Returns: + torch.Tensor - shape(N,3,H,W): Denormalized input + """ + mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device) + std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device) + return x * std + mean + +def get_activation(name, bank): + def hook(model, input, output): + bank[name] = output + return hook + + +class Resize(object): + """Resize sample to given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_target=True, + keep_aspect_ratio=False, + ensure_multiple_of=1, + resize_method="lower_bound", + ): + """Init. + Args: + width (int): desired output width + height (int): desired output height + resize_target (bool, optional): + True: Resize the full sample (image, mask, target). + False: Resize image only. + Defaults to True. + keep_aspect_ratio (bool, optional): + True: Keep the aspect ratio of the input sample. + Output sample might not have the given width and height, and + resize behaviour depends on the parameter 'resize_method'. + Defaults to False. + ensure_multiple_of (int, optional): + Output width and height is constrained to be multiple of this parameter. + Defaults to 1. + resize_method (str, optional): + "lower_bound": Output will be at least as large as the given size. + "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) + "minimal": Scale as least as possible. (Output size might be smaller than given size.) + Defaults to "lower_bound". + """ + print("Params passed to Resize transform:") + print("\twidth: ", width) + print("\theight: ", height) + print("\tresize_target: ", resize_target) + print("\tkeep_aspect_ratio: ", keep_aspect_ratio) + print("\tensure_multiple_of: ", ensure_multiple_of) + print("\tresize_method: ", resize_method) + + self.__width = width + self.__height = height + + self.__keep_aspect_ratio = keep_aspect_ratio + self.__multiple_of = ensure_multiple_of + self.__resize_method = resize_method + + def constrain_to_multiple_of(self, x, min_val=0, max_val=None): + y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if max_val is not None and y > max_val: + y = (np.floor(x / self.__multiple_of) + * self.__multiple_of).astype(int) + + if y < min_val: + y = (np.ceil(x / self.__multiple_of) + * self.__multiple_of).astype(int) + + return y + + def get_size(self, width, height): + # determine new height and width + scale_height = self.__height / height + scale_width = self.__width / width + + if self.__keep_aspect_ratio: + if self.__resize_method == "lower_bound": + # scale such that output size is lower bound + if scale_width > scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "upper_bound": + # scale such that output size is upper bound + if scale_width < scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "minimal": + # scale as least as possbile + if abs(1 - scale_width) < abs(1 - scale_height): + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented" + ) + + if self.__resize_method == "lower_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, min_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, min_val=self.__width + ) + elif self.__resize_method == "upper_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, max_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, max_val=self.__width + ) + elif self.__resize_method == "minimal": + new_height = self.constrain_to_multiple_of(scale_height * height) + new_width = self.constrain_to_multiple_of(scale_width * width) + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented") + + return (new_width, new_height) + + def __call__(self, x): + width, height = self.get_size(*x.shape[-2:][::-1]) + return nn.functional.interpolate(x, (height, width), mode='bilinear', align_corners=True) + +class PrepForMidas(object): + def __init__(self, resize_mode="minimal", keep_aspect_ratio=True, img_size=384, do_resize=True): + if isinstance(img_size, int): + img_size = (img_size, img_size) + net_h, net_w = img_size + self.normalization = Normalize( + mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + self.resizer = Resize(net_w, net_h, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=32, resize_method=resize_mode) \ + if do_resize else nn.Identity() + + def __call__(self, x): + return self.normalization(self.resizer(x)) + + +class MidasCore(nn.Module): + def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True, + img_size=384, **kwargs): + """Midas Base model used for multi-scale feature extraction. + + Args: + midas (torch.nn.Module): Midas model. + trainable (bool, optional): Train midas model. Defaults to False. + fetch_features (bool, optional): Extract multi-scale features. Defaults to True. + layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'). + freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False. + keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True. + img_size (int, tuple, optional): Input resolution. Defaults to 384. + """ + super().__init__() + self.core = midas + self.output_channels = None + self.core_out = {} + self.trainable = trainable + self.fetch_features = fetch_features + # midas.scratch.output_conv = nn.Identity() + self.handles = [] + # self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1'] + self.layer_names = layer_names + + self.set_trainable(trainable) + self.set_fetch_features(fetch_features) + + self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio, + img_size=img_size, do_resize=kwargs.get('do_resize', True)) + + if freeze_bn: + self.freeze_bn() + + def set_trainable(self, trainable): + self.trainable = trainable + if trainable: + self.unfreeze() + else: + self.freeze() + return self + + def set_fetch_features(self, fetch_features): + self.fetch_features = fetch_features + if fetch_features: + if len(self.handles) == 0: + self.attach_hooks(self.core) + else: + self.remove_hooks() + return self + + def freeze(self): + for p in self.parameters(): + p.requires_grad = False + self.trainable = False + return self + + def unfreeze(self): + for p in self.parameters(): + p.requires_grad = True + self.trainable = True + return self + + def freeze_bn(self): + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + return self + + def forward(self, x, denorm=False, return_rel_depth=False): + with torch.no_grad(): + if denorm: + x = denormalize(x) + x = self.prep(x) + # print("Shape after prep: ", x.shape) + + with torch.set_grad_enabled(self.trainable): + + # print("Input size to Midascore", x.shape) + rel_depth = self.core(x) + # print("Output from midas shape", rel_depth.shape) + if not self.fetch_features: + return rel_depth + out = [self.core_out[k] for k in self.layer_names] + + if return_rel_depth: + return rel_depth, out + return out + + def get_rel_pos_params(self): + for name, p in self.core.pretrained.named_parameters(): + if "relative_position" in name: + yield p + + def get_enc_params_except_rel_pos(self): + for name, p in self.core.pretrained.named_parameters(): + if "relative_position" not in name: + yield p + + def freeze_encoder(self, freeze_rel_pos=False): + if freeze_rel_pos: + for p in self.core.pretrained.parameters(): + p.requires_grad = False + else: + for p in self.get_enc_params_except_rel_pos(): + p.requires_grad = False + return self + + def attach_hooks(self, midas): + if len(self.handles) > 0: + self.remove_hooks() + if "out_conv" in self.layer_names: + self.handles.append(list(midas.scratch.output_conv.children())[ + 3].register_forward_hook(get_activation("out_conv", self.core_out))) + if "r4" in self.layer_names: + self.handles.append(midas.scratch.refinenet4.register_forward_hook( + get_activation("r4", self.core_out))) + if "r3" in self.layer_names: + self.handles.append(midas.scratch.refinenet3.register_forward_hook( + get_activation("r3", self.core_out))) + if "r2" in self.layer_names: + self.handles.append(midas.scratch.refinenet2.register_forward_hook( + get_activation("r2", self.core_out))) + if "r1" in self.layer_names: + self.handles.append(midas.scratch.refinenet1.register_forward_hook( + get_activation("r1", self.core_out))) + if "l4_rn" in self.layer_names: + self.handles.append(midas.scratch.layer4_rn.register_forward_hook( + get_activation("l4_rn", self.core_out))) + + return self + + def remove_hooks(self): + for h in self.handles: + h.remove() + return self + + def __del__(self): + self.remove_hooks() + + def set_output_channels(self, model_type): + self.output_channels = MIDAS_SETTINGS[model_type] + + @staticmethod + def build(midas_model_type="DPT_BEiT_L_384", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs): + if midas_model_type not in MIDAS_SETTINGS: + raise ValueError( + f"Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}") + if "img_size" in kwargs: + kwargs = MidasCore.parse_img_size(kwargs) + img_size = kwargs.pop("img_size", [384, 384]) + print("img_size", img_size) + midas = torch.hub.load("intel-isl/MiDaS", midas_model_type, + pretrained=use_pretrained_midas, force_reload=force_reload) + kwargs.update({'keep_aspect_ratio': force_keep_ar}) + midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features, + freeze_bn=freeze_bn, img_size=img_size, **kwargs) + midas_core.set_output_channels(midas_model_type) + return midas_core + + @staticmethod + def build_from_config(config): + return MidasCore.build(**config) + + @staticmethod + def parse_img_size(config): + assert 'img_size' in config + if isinstance(config['img_size'], str): + assert "," in config['img_size'], "img_size should be a string with comma separated img_size=H,W" + config['img_size'] = list(map(int, config['img_size'].split(","))) + assert len( + config['img_size']) == 2, "img_size should be a string with comma separated img_size=H,W" + elif isinstance(config['img_size'], int): + config['img_size'] = [config['img_size'], config['img_size']] + else: + assert isinstance(config['img_size'], list) and len( + config['img_size']) == 2, "img_size should be a list of H,W" + return config + + +nchannels2models = { + tuple([256]*5): ["DPT_BEiT_L_384", "DPT_BEiT_L_512", "DPT_BEiT_B_384", "DPT_SwinV2_L_384", "DPT_SwinV2_B_384", "DPT_SwinV2_T_256", "DPT_Large", "DPT_Hybrid"], + (512, 256, 128, 64, 64): ["MiDaS_small"] +} + +# Model name to number of output channels +MIDAS_SETTINGS = {m: k for k, v in nchannels2models.items() + for m in v + } diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/builder.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..4363d59689158912a412feb5c296b4a72bc2c608 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/builder.py @@ -0,0 +1,51 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +from importlib import import_module +from zoedepth.models.depth_model import DepthModel + +def build_model(config) -> DepthModel: + """Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface. + This function should be used to construct models for training and evaluation. + + Args: + config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder. + + Returns: + torch.nn.Module: Model corresponding to name and version as specified in config + """ + module_name = f"zoedepth.models.{config.model}" + try: + module = import_module(module_name) + except ModuleNotFoundError as e: + # print the original error message + print(e) + raise ValueError( + f"Model {config.model} not found. Refer above error for details.") from e + try: + get_version = getattr(module, "get_version") + except AttributeError as e: + raise ValueError( + f"Model {config.model} has no get_version function.") from e + return get_version(config.version_name).build_from_config(config) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/depth_model.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/depth_model.py new file mode 100644 index 0000000000000000000000000000000000000000..fc421c108ea3928c9add62b4c190500d9bd4eda1 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/depth_model.py @@ -0,0 +1,152 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import transforms +import PIL.Image +from PIL import Image +from typing import Union + + +class DepthModel(nn.Module): + def __init__(self): + super().__init__() + self.device = 'cpu' + + def to(self, device) -> nn.Module: + self.device = device + return super().to(device) + + def forward(self, x, *args, **kwargs): + raise NotImplementedError + + def _infer(self, x: torch.Tensor): + """ + Inference interface for the model + Args: + x (torch.Tensor): input tensor of shape (b, c, h, w) + Returns: + torch.Tensor: output tensor of shape (b, 1, h, w) + """ + return self(x)['metric_depth'] + + def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode="reflect", **kwargs) -> torch.Tensor: + """ + Inference interface for the model with padding augmentation + Padding augmentation fixes the boundary artifacts in the output depth map. + Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image. + This augmentation pads the input image and crops the prediction back to the original size / view. + + Note: This augmentation is not required for the models trained with 'avoid_boundary'=True. + Args: + x (torch.Tensor): input tensor of shape (b, c, h, w) + pad_input (bool, optional): whether to pad the input or not. Defaults to True. + fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3. + fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3. + upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'. + padding_mode (str, optional): padding mode. Defaults to "reflect". + Returns: + torch.Tensor: output tensor of shape (b, 1, h, w) + """ + # assert x is nchw and c = 3 + assert x.dim() == 4, "x must be 4 dimensional, got {}".format(x.dim()) + assert x.shape[1] == 3, "x must have 3 channels, got {}".format(x.shape[1]) + + if pad_input: + assert fh > 0 or fw > 0, "atlease one of fh and fw must be greater than 0" + pad_h = int(np.sqrt(x.shape[2]/2) * fh) + pad_w = int(np.sqrt(x.shape[3]/2) * fw) + padding = [pad_w, pad_w] + if pad_h > 0: + padding += [pad_h, pad_h] + + x = F.pad(x, padding, mode=padding_mode, **kwargs) + out = self._infer(x) + if out.shape[-2:] != x.shape[-2:]: + out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False) + if pad_input: + # crop to the original size, handling the case where pad_h and pad_w is 0 + if pad_h > 0: + out = out[:, :, pad_h:-pad_h,:] + if pad_w > 0: + out = out[:, :, :, pad_w:-pad_w] + return out + + def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor: + """ + Inference interface for the model with horizontal flip augmentation + Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip. + Args: + x (torch.Tensor): input tensor of shape (b, c, h, w) + pad_input (bool, optional): whether to use padding augmentation. Defaults to True. + Returns: + torch.Tensor: output tensor of shape (b, 1, h, w) + """ + # infer with horizontal flip and average + out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs) + out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs) + out = (out + torch.flip(out_flip, dims=[3])) / 2 + return out + + def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor: + """ + Inference interface for the model + Args: + x (torch.Tensor): input tensor of shape (b, c, h, w) + pad_input (bool, optional): whether to use padding augmentation. Defaults to True. + with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True. + Returns: + torch.Tensor: output tensor of shape (b, 1, h, w) + """ + if with_flip_aug: + return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs) + else: + return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs) + + @torch.no_grad() + def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str="numpy", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]: + """ + Inference interface for the model for PIL image + Args: + pil_img (PIL.Image.Image): input PIL image + pad_input (bool, optional): whether to use padding augmentation. Defaults to True. + with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True. + output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to "numpy". + """ + x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device) + out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs) + if output_type == "numpy": + return out_tensor.squeeze().cpu().numpy() + elif output_type == "pil": + # uint16 is required for depth pil image + out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16) + return Image.fromarray(out_16bit_numpy) + elif output_type == "tensor": + return out_tensor.squeeze().cpu() + else: + raise ValueError(f"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'") + \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/attractor.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/attractor.py new file mode 100644 index 0000000000000000000000000000000000000000..2a8efe645adea1d88a12e2ac5cc6bb2a251eef9d --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/attractor.py @@ -0,0 +1,208 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn + + +@torch.jit.script +def exp_attractor(dx, alpha: float = 300, gamma: int = 2): + """Exponential attractor: dc = exp(-alpha*|dx|^gamma) * dx , where dx = a - c, a = attractor point, c = bin center, dc = shift in bin centermmary for exp_attractor + + Args: + dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center. + alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300. + gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2. + + Returns: + torch.Tensor : Delta shifts - dc; New bin centers = Old bin centers + dc + """ + return torch.exp(-alpha*(torch.abs(dx)**gamma)) * (dx) + + +@torch.jit.script +def inv_attractor(dx, alpha: float = 300, gamma: int = 2): + """Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center + This is the default one according to the accompanying paper. + + Args: + dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center. + alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300. + gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2. + + Returns: + torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc + """ + return dx.div(1+alpha*dx.pow(gamma)) + + +class AttractorLayer(nn.Module): + def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10, + alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False): + """ + Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth) + """ + super().__init__() + + self.n_attractors = n_attractors + self.n_bins = n_bins + self.min_depth = min_depth + self.max_depth = max_depth + self.alpha = alpha + self.gamma = gamma + self.kind = kind + self.attractor_type = attractor_type + self.memory_efficient = memory_efficient + + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.ReLU(inplace=True), + nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm + nn.ReLU(inplace=True) + ) + + def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): + """ + Args: + x (torch.Tensor) : feature block; shape - n, c, h, w + b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w + + Returns: + tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w + """ + if prev_b_embedding is not None: + if interpolate: + prev_b_embedding = nn.functional.interpolate( + prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) + x = x + prev_b_embedding + + A = self._net(x) + eps = 1e-3 + A = A + eps + n, c, h, w = A.shape + A = A.view(n, self.n_attractors, 2, h, w) + A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w + A_normed = A[:, :, 0, ...] # n, na, h, w + + b_prev = nn.functional.interpolate( + b_prev, (h, w), mode='bilinear', align_corners=True) + b_centers = b_prev + + if self.attractor_type == 'exp': + dist = exp_attractor + else: + dist = inv_attractor + + if not self.memory_efficient: + func = {'mean': torch.mean, 'sum': torch.sum}[self.kind] + # .shape N, nbins, h, w + delta_c = func(dist(A_normed.unsqueeze( + 2) - b_centers.unsqueeze(1)), dim=1) + else: + delta_c = torch.zeros_like(b_centers, device=b_centers.device) + for i in range(self.n_attractors): + # .shape N, nbins, h, w + delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers) + + if self.kind == 'mean': + delta_c = delta_c / self.n_attractors + + b_new_centers = b_centers + delta_c + B_centers = (self.max_depth - self.min_depth) * \ + b_new_centers + self.min_depth + B_centers, _ = torch.sort(B_centers, dim=1) + B_centers = torch.clip(B_centers, self.min_depth, self.max_depth) + return b_new_centers, B_centers + + +class AttractorLayerUnnormed(nn.Module): + def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10, + alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False): + """ + Attractor layer for bin centers. Bin centers are unbounded + """ + super().__init__() + + self.n_attractors = n_attractors + self.n_bins = n_bins + self.min_depth = min_depth + self.max_depth = max_depth + self.alpha = alpha + self.gamma = gamma + self.kind = kind + self.attractor_type = attractor_type + self.memory_efficient = memory_efficient + + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.ReLU(inplace=True), + nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0), + nn.Softplus() + ) + + def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): + """ + Args: + x (torch.Tensor) : feature block; shape - n, c, h, w + b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w + + Returns: + tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version + """ + if prev_b_embedding is not None: + if interpolate: + prev_b_embedding = nn.functional.interpolate( + prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) + x = x + prev_b_embedding + + A = self._net(x) + n, c, h, w = A.shape + + b_prev = nn.functional.interpolate( + b_prev, (h, w), mode='bilinear', align_corners=True) + b_centers = b_prev + + if self.attractor_type == 'exp': + dist = exp_attractor + else: + dist = inv_attractor + + if not self.memory_efficient: + func = {'mean': torch.mean, 'sum': torch.sum}[self.kind] + # .shape N, nbins, h, w + delta_c = func( + dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1) + else: + delta_c = torch.zeros_like(b_centers, device=b_centers.device) + for i in range(self.n_attractors): + delta_c += dist(A[:, i, ...].unsqueeze(1) - + b_centers) # .shape N, nbins, h, w + + if self.kind == 'mean': + delta_c = delta_c / self.n_attractors + + b_new_centers = b_centers + delta_c + B_centers = b_new_centers + + return b_new_centers, B_centers diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/dist_layers.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/dist_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..3208405dfb78fdfc28d5765e5a6d5dbe31967a23 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/dist_layers.py @@ -0,0 +1,121 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn + + +def log_binom(n, k, eps=1e-7): + """ log(nCk) using stirling approximation """ + n = n + eps + k = k + eps + return n * torch.log(n) - k * torch.log(k) - (n-k) * torch.log(n-k+eps) + + +class LogBinomial(nn.Module): + def __init__(self, n_classes=256, act=torch.softmax): + """Compute log binomial distribution for n_classes + + Args: + n_classes (int, optional): number of output classes. Defaults to 256. + """ + super().__init__() + self.K = n_classes + self.act = act + self.register_buffer('k_idx', torch.arange( + 0, n_classes).view(1, -1, 1, 1)) + self.register_buffer('K_minus_1', torch.Tensor( + [self.K-1]).view(1, -1, 1, 1)) + + def forward(self, x, t=1., eps=1e-4): + """Compute log binomial distribution for x + + Args: + x (torch.Tensor - NCHW): probabilities + t (float, torch.Tensor - NCHW, optional): Temperature of distribution. Defaults to 1.. + eps (float, optional): Small number for numerical stability. Defaults to 1e-4. + + Returns: + torch.Tensor -NCHW: log binomial distribution logbinomial(p;t) + """ + if x.ndim == 3: + x = x.unsqueeze(1) # make it nchw + + one_minus_x = torch.clamp(1 - x, eps, 1) + x = torch.clamp(x, eps, 1) + y = log_binom(self.K_minus_1, self.k_idx) + self.k_idx * \ + torch.log(x) + (self.K - 1 - self.k_idx) * torch.log(one_minus_x) + return self.act(y/t, dim=1) + + +class ConditionalLogBinomial(nn.Module): + def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax): + """Conditional Log Binomial distribution + + Args: + in_features (int): number of input channels in main feature + condition_dim (int): number of input channels in condition feature + n_classes (int, optional): Number of classes. Defaults to 256. + bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2. + p_eps (float, optional): small eps value. Defaults to 1e-4. + max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50. + min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7. + """ + super().__init__() + self.p_eps = p_eps + self.max_temp = max_temp + self.min_temp = min_temp + self.log_binomial_transform = LogBinomial(n_classes, act=act) + bottleneck = (in_features + condition_dim) // bottleneck_factor + self.mlp = nn.Sequential( + nn.Conv2d(in_features + condition_dim, bottleneck, + kernel_size=1, stride=1, padding=0), + nn.GELU(), + # 2 for p linear norm, 2 for t linear norm + nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0), + nn.Softplus() + ) + + def forward(self, x, cond): + """Forward pass + + Args: + x (torch.Tensor - NCHW): Main feature + cond (torch.Tensor - NCHW): condition feature + + Returns: + torch.Tensor: Output log binomial distribution + """ + pt = self.mlp(torch.concat((x, cond), dim=1)) + p, t = pt[:, :2, ...], pt[:, 2:, ...] + + p = p + self.p_eps + p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...]) + + t = t + self.p_eps + t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...]) + t = t.unsqueeze(1) + t = (self.max_temp - self.min_temp) * t + self.min_temp + + return self.log_binomial_transform(p, t) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/localbins_layers.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/localbins_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..f94481605c3e6958ce50e73b2eb31d9f0c07dc67 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/localbins_layers.py @@ -0,0 +1,169 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn + + +class SeedBinRegressor(nn.Module): + def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10): + """Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval. + + Args: + in_features (int): input channels + n_bins (int, optional): Number of bin centers. Defaults to 16. + mlp_dim (int, optional): Hidden dimension. Defaults to 256. + min_depth (float, optional): Min depth value. Defaults to 1e-3. + max_depth (float, optional): Max depth value. Defaults to 10. + """ + super().__init__() + self.version = "1_1" + self.min_depth = min_depth + self.max_depth = max_depth + + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.ReLU(inplace=True), + nn.Conv2d(mlp_dim, n_bins, 1, 1, 0), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + """ + Returns tensor of bin_width vectors (centers). One vector b for every pixel + """ + B = self._net(x) + eps = 1e-3 + B = B + eps + B_widths_normed = B / B.sum(dim=1, keepdim=True) + B_widths = (self.max_depth - self.min_depth) * \ + B_widths_normed # .shape NCHW + # pad has the form (left, right, top, bottom, front, back) + B_widths = nn.functional.pad( + B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth) + B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW + + B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...]) + return B_widths_normed, B_centers + + +class SeedBinRegressorUnnormed(nn.Module): + def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10): + """Bin center regressor network. Bin centers are unbounded + + Args: + in_features (int): input channels + n_bins (int, optional): Number of bin centers. Defaults to 16. + mlp_dim (int, optional): Hidden dimension. Defaults to 256. + min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor) + max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor) + """ + super().__init__() + self.version = "1_1" + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.ReLU(inplace=True), + nn.Conv2d(mlp_dim, n_bins, 1, 1, 0), + nn.Softplus() + ) + + def forward(self, x): + """ + Returns tensor of bin_width vectors (centers). One vector b for every pixel + """ + B_centers = self._net(x) + return B_centers, B_centers + + +class Projector(nn.Module): + def __init__(self, in_features, out_features, mlp_dim=128): + """Projector MLP + + Args: + in_features (int): input channels + out_features (int): output channels + mlp_dim (int, optional): hidden dimension. Defaults to 128. + """ + super().__init__() + + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.ReLU(inplace=True), + nn.Conv2d(mlp_dim, out_features, 1, 1, 0), + ) + + def forward(self, x): + return self._net(x) + + + +class LinearSplitter(nn.Module): + def __init__(self, in_features, prev_nbins, split_factor=2, mlp_dim=128, min_depth=1e-3, max_depth=10): + super().__init__() + + self.prev_nbins = prev_nbins + self.split_factor = split_factor + self.min_depth = min_depth + self.max_depth = max_depth + + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.GELU(), + nn.Conv2d(mlp_dim, prev_nbins * split_factor, 1, 1, 0), + nn.ReLU() + ) + + def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): + """ + x : feature block; shape - n, c, h, w + b_prev : previous bin widths normed; shape - n, prev_nbins, h, w + """ + if prev_b_embedding is not None: + if interpolate: + prev_b_embedding = nn.functional.interpolate(prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) + x = x + prev_b_embedding + S = self._net(x) + eps = 1e-3 + S = S + eps + n, c, h, w = S.shape + S = S.view(n, self.prev_nbins, self.split_factor, h, w) + S_normed = S / S.sum(dim=2, keepdim=True) # fractional splits + + b_prev = nn.functional.interpolate(b_prev, (h,w), mode='bilinear', align_corners=True) + + + b_prev = b_prev / b_prev.sum(dim=1, keepdim=True) # renormalize for gurantees + # print(b_prev.shape, S_normed.shape) + # if is_for_query:(1).expand(-1, b_prev.size(0)//n, -1, -1, -1, -1).flatten(0,1) # TODO ? can replace all this with a single torch.repeat? + b = b_prev.unsqueeze(2) * S_normed + b = b.flatten(1,2) # .shape n, prev_nbins * split_factor, h, w + + # calculate bin centers for loss calculation + B_widths = (self.max_depth - self.min_depth) * b # .shape N, nprev * splitfactor, H, W + # pad has the form (left, right, top, bottom, front, back) + B_widths = nn.functional.pad(B_widths, (0,0,0,0,1,0), mode='constant', value=self.min_depth) + B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW + + B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:,1:,...]) + return b, B_centers \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/patch_transformer.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/patch_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..99d9e51a06b981bae45ce7dd64eaef19a4121991 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/layers/patch_transformer.py @@ -0,0 +1,91 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn + + +class PatchTransformerEncoder(nn.Module): + def __init__(self, in_channels, patch_size=10, embedding_dim=128, num_heads=4, use_class_token=False): + """ViT-like transformer block + + Args: + in_channels (int): Input channels + patch_size (int, optional): patch size. Defaults to 10. + embedding_dim (int, optional): Embedding dimension in transformer model. Defaults to 128. + num_heads (int, optional): number of attention heads. Defaults to 4. + use_class_token (bool, optional): Whether to use extra token at the start for global accumulation (called as "class token"). Defaults to False. + """ + super(PatchTransformerEncoder, self).__init__() + self.use_class_token = use_class_token + encoder_layers = nn.TransformerEncoderLayer( + embedding_dim, num_heads, dim_feedforward=1024) + self.transformer_encoder = nn.TransformerEncoder( + encoder_layers, num_layers=4) # takes shape S,N,E + + self.embedding_convPxP = nn.Conv2d(in_channels, embedding_dim, + kernel_size=patch_size, stride=patch_size, padding=0) + + def positional_encoding_1d(self, sequence_length, batch_size, embedding_dim, device='cpu'): + """Generate positional encodings + + Args: + sequence_length (int): Sequence length + embedding_dim (int): Embedding dimension + + Returns: + torch.Tensor SBE: Positional encodings + """ + position = torch.arange( + 0, sequence_length, dtype=torch.float32, device=device).unsqueeze(1) + index = torch.arange( + 0, embedding_dim, 2, dtype=torch.float32, device=device).unsqueeze(0) + div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim)) + pos_encoding = position * div_term + pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1) + pos_encoding = pos_encoding.unsqueeze(1).repeat(1, batch_size, 1) + return pos_encoding + + + def forward(self, x): + """Forward pass + + Args: + x (torch.Tensor - NCHW): Input feature tensor + + Returns: + torch.Tensor - SNE: Transformer output embeddings. S - sequence length (=HW/patch_size^2), N - batch size, E - embedding dim + """ + embeddings = self.embedding_convPxP(x).flatten( + 2) # .shape = n,c,s = n, embedding_dim, s + if self.use_class_token: + # extra special token at start ? + embeddings = nn.functional.pad(embeddings, (1, 0)) + + # change to S,N,E format required by transformer + embeddings = embeddings.permute(2, 0, 1) + S, N, E = embeddings.shape + embeddings = embeddings + self.positional_encoding_1d(S, N, E, device=embeddings.device) + x = self.transformer_encoder(embeddings) # .shape = S, N, E + return x diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/model_io.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/model_io.py new file mode 100644 index 0000000000000000000000000000000000000000..78b6579631dd847ac76651238cb5a948b5a66286 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/model_io.py @@ -0,0 +1,92 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch + +def load_state_dict(model, state_dict): + """Load state_dict into model, handling DataParallel and DistributedDataParallel. Also checks for "model" key in state_dict. + + DataParallel prefixes state_dict keys with 'module.' when saving. + If the model is not a DataParallel model but the state_dict is, then prefixes are removed. + If the model is a DataParallel model but the state_dict is not, then prefixes are added. + """ + state_dict = state_dict.get('model', state_dict) + # if model is a DataParallel model, then state_dict keys are prefixed with 'module.' + + do_prefix = isinstance( + model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)) + state = {} + for k, v in state_dict.items(): + if k.startswith('module.') and not do_prefix: + k = k[7:] + + if not k.startswith('module.') and do_prefix: + k = 'module.' + k + + state[k] = v + + model.load_state_dict(state) + print("Loaded successfully") + return model + + +def load_wts(model, checkpoint_path): + ckpt = torch.load(checkpoint_path, map_location='cpu') + return load_state_dict(model, ckpt) + + +def load_state_dict_from_url(model, url, **kwargs): + state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu', **kwargs) + return load_state_dict(model, state_dict) + + +def load_state_from_resource(model, resource: str): + """Loads weights to the model from a given resource. A resource can be of following types: + 1. URL. Prefixed with "url::" + e.g. url::http(s)://url.resource.com/ckpt.pt + + 2. Local path. Prefixed with "local::" + e.g. local::/path/to/ckpt.pt + + + Args: + model (torch.nn.Module): Model + resource (str): resource string + + Returns: + torch.nn.Module: Model with loaded weights + """ + print(f"Using pretrained resource {resource}") + + if resource.startswith('url::'): + url = resource.split('url::')[1] + return load_state_dict_from_url(model, url, progress=True) + + elif resource.startswith('local::'): + path = resource.split('local::')[1] + return load_wts(model, path) + + else: + raise ValueError("Invalid resource type, only url:: and local:: are supported") + \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/__init__.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc33f737d238766559f0e3a8def3c0b568f23b7f --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/__init__.py @@ -0,0 +1,31 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +from .zoedepth_v1 import ZoeDepth + +all_versions = { + "v1": ZoeDepth, +} + +get_version = lambda v : all_versions[v] \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/config_zoedepth.json b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/config_zoedepth.json new file mode 100644 index 0000000000000000000000000000000000000000..99beb2dcd886006ba87805bddbe408b6d5fdff78 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/config_zoedepth.json @@ -0,0 +1,58 @@ +{ + "model": { + "name": "ZoeDepth", + "version_name": "v1", + "n_bins": 64, + "bin_embedding_dim": 128, + "bin_centers_type": "softplus", + "n_attractors":[16, 8, 4, 1], + "attractor_alpha": 1000, + "attractor_gamma": 2, + "attractor_kind" : "mean", + "attractor_type" : "inv", + "midas_model_type" : "DPT_BEiT_L_384", + "min_temp": 0.0212, + "max_temp": 50.0, + "output_distribution": "logbinomial", + "memory_efficient": true, + "inverse_midas": false, + "img_size": [384, 512] + }, + + "train": { + "train_midas": true, + "use_pretrained_midas": true, + "trainer": "zoedepth", + "epochs": 5, + "bs": 16, + "optim_kwargs": {"lr": 0.000161, "wd": 0.01}, + "sched_kwargs": {"div_factor": 1, "final_div_factor": 10000, "pct_start": 0.7, "three_phase":false, "cycle_momentum": true}, + "same_lr": false, + "w_si": 1, + "w_domain": 0.2, + "w_reg": 0, + "w_grad": 0, + "avoid_boundary": false, + "random_crop": false, + "input_width": 640, + "input_height": 480, + "midas_lr_factor": 1, + "encoder_lr_factor":10, + "pos_enc_lr_factor":10, + "freeze_midas_bn": true + + }, + + "infer":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_N.pt", + "force_keep_ar": true + }, + + "eval":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_N.pt" + } +} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/config_zoedepth_kitti.json b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/config_zoedepth_kitti.json new file mode 100644 index 0000000000000000000000000000000000000000..b51802aa44b91c39e15aacaac4b5ab6bec884414 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/config_zoedepth_kitti.json @@ -0,0 +1,22 @@ +{ + "model": { + "bin_centers_type": "normed", + "img_size": [384, 768] + }, + + "train": { + }, + + "infer":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt", + "force_keep_ar": true + }, + + "eval":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt" + } +} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/zoedepth_v1.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/zoedepth_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..5600cb57dfd9b88f1cccf886fe14e0900856a57f --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth/zoedepth_v1.py @@ -0,0 +1,250 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import itertools + +import torch +import torch.nn as nn +from zoedepth.models.depth_model import DepthModel +from zoedepth.models.base_models.midas import MidasCore +from zoedepth.models.layers.attractor import AttractorLayer, AttractorLayerUnnormed +from zoedepth.models.layers.dist_layers import ConditionalLogBinomial +from zoedepth.models.layers.localbins_layers import (Projector, SeedBinRegressor, + SeedBinRegressorUnnormed) +from zoedepth.models.model_io import load_state_from_resource + + +class ZoeDepth(DepthModel): + def __init__(self, core, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10, + n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True, + midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): + """ZoeDepth model. This is the version of ZoeDepth that has a single metric head + + Args: + core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features + n_bins (int, optional): Number of bin centers. Defaults to 64. + bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. + For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". + bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. + min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3. + max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10. + n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. + attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. + attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. + attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. + attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. + min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. + max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. + train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. + midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. + encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. + pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. + """ + super().__init__() + + self.core = core + self.max_depth = max_depth + self.min_depth = min_depth + self.min_temp = min_temp + self.bin_centers_type = bin_centers_type + + self.midas_lr_factor = midas_lr_factor + self.encoder_lr_factor = encoder_lr_factor + self.pos_enc_lr_factor = pos_enc_lr_factor + self.train_midas = train_midas + self.inverse_midas = inverse_midas + + if self.encoder_lr_factor <= 0: + self.core.freeze_encoder( + freeze_rel_pos=self.pos_enc_lr_factor <= 0) + + N_MIDAS_OUT = 32 + btlnck_features = self.core.output_channels[0] + num_out_features = self.core.output_channels[1:] + + self.conv2 = nn.Conv2d(btlnck_features, btlnck_features, + kernel_size=1, stride=1, padding=0) # btlnck conv + + if bin_centers_type == "normed": + SeedBinRegressorLayer = SeedBinRegressor + Attractor = AttractorLayer + elif bin_centers_type == "softplus": + SeedBinRegressorLayer = SeedBinRegressorUnnormed + Attractor = AttractorLayerUnnormed + elif bin_centers_type == "hybrid1": + SeedBinRegressorLayer = SeedBinRegressor + Attractor = AttractorLayerUnnormed + elif bin_centers_type == "hybrid2": + SeedBinRegressorLayer = SeedBinRegressorUnnormed + Attractor = AttractorLayer + else: + raise ValueError( + "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") + + self.seed_bin_regressor = SeedBinRegressorLayer( + btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth) + self.seed_projector = Projector(btlnck_features, bin_embedding_dim) + self.projectors = nn.ModuleList([ + Projector(num_out, bin_embedding_dim) + for num_out in num_out_features + ]) + self.attractors = nn.ModuleList([ + Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth, + alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type) + for i in range(len(num_out_features)) + ]) + + last_in = N_MIDAS_OUT + 1 # +1 for relative depth + + # use log binomial instead of softmax + self.conditional_log_binomial = ConditionalLogBinomial( + last_in, bin_embedding_dim, n_classes=n_bins, min_temp=min_temp, max_temp=max_temp) + + def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs): + """ + Args: + x (torch.Tensor): Input image tensor of shape (B, C, H, W) + return_final_centers (bool, optional): Whether to return the final bin centers. Defaults to False. + denorm (bool, optional): Whether to denormalize the input image. This reverses ImageNet normalization as midas normalization is different. Defaults to False. + return_probs (bool, optional): Whether to return the output probability distribution. Defaults to False. + + Returns: + dict: Dictionary containing the following keys: + - rel_depth (torch.Tensor): Relative depth map of shape (B, H, W) + - metric_depth (torch.Tensor): Metric depth map of shape (B, 1, H, W) + - bin_centers (torch.Tensor): Bin centers of shape (B, n_bins). Present only if return_final_centers is True + - probs (torch.Tensor): Output probability distribution of shape (B, n_bins, H, W). Present only if return_probs is True + + """ + b, c, h, w = x.shape + # print("input shape ", x.shape) + self.orig_input_width = w + self.orig_input_height = h + rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True) + # print("output shapes", rel_depth.shape, out.shape) + + outconv_activation = out[0] + btlnck = out[1] + x_blocks = out[2:] + + x_d0 = self.conv2(btlnck) + x = x_d0 + _, seed_b_centers = self.seed_bin_regressor(x) + + if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': + b_prev = (seed_b_centers - self.min_depth) / \ + (self.max_depth - self.min_depth) + else: + b_prev = seed_b_centers + + prev_b_embedding = self.seed_projector(x) + + # unroll this loop for better performance + for projector, attractor, x in zip(self.projectors, self.attractors, x_blocks): + b_embedding = projector(x) + b, b_centers = attractor( + b_embedding, b_prev, prev_b_embedding, interpolate=True) + b_prev = b.clone() + prev_b_embedding = b_embedding.clone() + + last = outconv_activation + + if self.inverse_midas: + # invert depth followed by normalization + rel_depth = 1.0 / (rel_depth + 1e-6) + rel_depth = (rel_depth - rel_depth.min()) / \ + (rel_depth.max() - rel_depth.min()) + # concat rel depth with last. First interpolate rel depth to last size + rel_cond = rel_depth.unsqueeze(1) + rel_cond = nn.functional.interpolate( + rel_cond, size=last.shape[2:], mode='bilinear', align_corners=True) + last = torch.cat([last, rel_cond], dim=1) + + b_embedding = nn.functional.interpolate( + b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) + x = self.conditional_log_binomial(last, b_embedding) + + # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor + # print(x.shape, b_centers.shape) + b_centers = nn.functional.interpolate( + b_centers, x.shape[-2:], mode='bilinear', align_corners=True) + out = torch.sum(x * b_centers, dim=1, keepdim=True) + + # Structure output dict + output = dict(metric_depth=out) + if return_final_centers or return_probs: + output['bin_centers'] = b_centers + + if return_probs: + output['probs'] = x + + return output + + def get_lr_params(self, lr): + """ + Learning rate configuration for different layers of the model + Args: + lr (float) : Base learning rate + Returns: + list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. + """ + param_conf = [] + if self.train_midas: + if self.encoder_lr_factor > 0: + param_conf.append({'params': self.core.get_enc_params_except_rel_pos( + ), 'lr': lr / self.encoder_lr_factor}) + + if self.pos_enc_lr_factor > 0: + param_conf.append( + {'params': self.core.get_rel_pos_params(), 'lr': lr / self.pos_enc_lr_factor}) + + midas_params = self.core.core.scratch.parameters() + midas_lr_factor = self.midas_lr_factor + param_conf.append( + {'params': midas_params, 'lr': lr / midas_lr_factor}) + + remaining_modules = [] + for name, child in self.named_children(): + if name != 'core': + remaining_modules.append(child) + remaining_params = itertools.chain( + *[child.parameters() for child in remaining_modules]) + + param_conf.append({'params': remaining_params, 'lr': lr}) + + return param_conf + + @staticmethod + def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs): + core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas, + train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs) + model = ZoeDepth(core, **kwargs) + if pretrained_resource: + assert isinstance(pretrained_resource, str), "pretrained_resource must be a string" + model = load_state_from_resource(model, pretrained_resource) + return model + + @staticmethod + def build_from_config(config): + return ZoeDepth.build(**config) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth_nk/__init__.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth_nk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..513a278b939c10c010e3c0250ec73544d5663886 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth_nk/__init__.py @@ -0,0 +1,31 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +from .zoedepth_nk_v1 import ZoeDepthNK + +all_versions = { + "v1": ZoeDepthNK, +} + +get_version = lambda v : all_versions[v] \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth_nk/config_zoedepth_nk.json b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth_nk/config_zoedepth_nk.json new file mode 100644 index 0000000000000000000000000000000000000000..42bab2a3ad159a09599a5aba270c491021a3cf1a --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth_nk/config_zoedepth_nk.json @@ -0,0 +1,67 @@ +{ + "model": { + "name": "ZoeDepthNK", + "version_name": "v1", + "bin_conf" : [ + { + "name": "nyu", + "n_bins": 64, + "min_depth": 1e-3, + "max_depth": 10.0 + }, + { + "name": "kitti", + "n_bins": 64, + "min_depth": 1e-3, + "max_depth": 80.0 + } + ], + "bin_embedding_dim": 128, + "bin_centers_type": "softplus", + "n_attractors":[16, 8, 4, 1], + "attractor_alpha": 1000, + "attractor_gamma": 2, + "attractor_kind" : "mean", + "attractor_type" : "inv", + "min_temp": 0.0212, + "max_temp": 50.0, + "memory_efficient": true, + "midas_model_type" : "DPT_BEiT_L_384", + "img_size": [384, 512] + }, + + "train": { + "train_midas": true, + "use_pretrained_midas": true, + "trainer": "zoedepth_nk", + "epochs": 5, + "bs": 16, + "optim_kwargs": {"lr": 0.0002512, "wd": 0.01}, + "sched_kwargs": {"div_factor": 1, "final_div_factor": 10000, "pct_start": 0.7, "three_phase":false, "cycle_momentum": true}, + "same_lr": false, + "w_si": 1, + "w_domain": 100, + "avoid_boundary": false, + "random_crop": false, + "input_width": 640, + "input_height": 480, + "w_grad": 0, + "w_reg": 0, + "midas_lr_factor": 10, + "encoder_lr_factor":10, + "pos_enc_lr_factor":10 + }, + + "infer": { + "train_midas": false, + "pretrained_resource": "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_NK.pt", + "use_pretrained_midas": false, + "force_keep_ar": true + }, + + "eval": { + "train_midas": false, + "pretrained_resource": "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_NK.pt", + "use_pretrained_midas": false + } +} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..7368ae8031188a9f946d9d3f29633c96e791e68e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py @@ -0,0 +1,333 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import itertools + +import torch +import torch.nn as nn + +from zoedepth.models.depth_model import DepthModel +from zoedepth.models.base_models.midas import MidasCore +from zoedepth.models.layers.attractor import AttractorLayer, AttractorLayerUnnormed +from zoedepth.models.layers.dist_layers import ConditionalLogBinomial +from zoedepth.models.layers.localbins_layers import (Projector, SeedBinRegressor, + SeedBinRegressorUnnormed) +from zoedepth.models.layers.patch_transformer import PatchTransformerEncoder +from zoedepth.models.model_io import load_state_from_resource + + +class ZoeDepthNK(DepthModel): + def __init__(self, core, bin_conf, bin_centers_type="softplus", bin_embedding_dim=128, + n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', + min_temp=5, max_temp=50, + memory_efficient=False, train_midas=True, + is_midas_pretrained=True, midas_lr_factor=1, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): + """ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts. + + Args: + core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features + + bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys: + "name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float) + + The length of this list determines the number of metric heads. + bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. + For "softplus", softplus activation is used and thus are unbounded. Defaults to "normed". + bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. + + n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. + attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. + attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. + attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. + attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. + + min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. + max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. + + memory_efficient (bool, optional): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to False. + + train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. + is_midas_pretrained (bool, optional): Is "core" pretrained? Defaults to True. + midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. + encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. + pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. + + """ + + super().__init__() + + self.core = core + self.bin_conf = bin_conf + self.min_temp = min_temp + self.max_temp = max_temp + self.memory_efficient = memory_efficient + self.train_midas = train_midas + self.is_midas_pretrained = is_midas_pretrained + self.midas_lr_factor = midas_lr_factor + self.encoder_lr_factor = encoder_lr_factor + self.pos_enc_lr_factor = pos_enc_lr_factor + self.inverse_midas = inverse_midas + + N_MIDAS_OUT = 32 + btlnck_features = self.core.output_channels[0] + num_out_features = self.core.output_channels[1:] + # self.scales = [16, 8, 4, 2] # spatial scale factors + + self.conv2 = nn.Conv2d( + btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0) + + # Transformer classifier on the bottleneck + self.patch_transformer = PatchTransformerEncoder( + btlnck_features, 1, 128, use_class_token=True) + self.mlp_classifier = nn.Sequential( + nn.Linear(128, 128), + nn.ReLU(), + nn.Linear(128, 2) + ) + + if bin_centers_type == "normed": + SeedBinRegressorLayer = SeedBinRegressor + Attractor = AttractorLayer + elif bin_centers_type == "softplus": + SeedBinRegressorLayer = SeedBinRegressorUnnormed + Attractor = AttractorLayerUnnormed + elif bin_centers_type == "hybrid1": + SeedBinRegressorLayer = SeedBinRegressor + Attractor = AttractorLayerUnnormed + elif bin_centers_type == "hybrid2": + SeedBinRegressorLayer = SeedBinRegressorUnnormed + Attractor = AttractorLayer + else: + raise ValueError( + "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") + self.bin_centers_type = bin_centers_type + # We have bins for each bin conf. + # Create a map (ModuleDict) of 'name' -> seed_bin_regressor + self.seed_bin_regressors = nn.ModuleDict( + {conf['name']: SeedBinRegressorLayer(btlnck_features, conf["n_bins"], mlp_dim=bin_embedding_dim//2, min_depth=conf["min_depth"], max_depth=conf["max_depth"]) + for conf in bin_conf} + ) + + self.seed_projector = Projector( + btlnck_features, bin_embedding_dim, mlp_dim=bin_embedding_dim//2) + self.projectors = nn.ModuleList([ + Projector(num_out, bin_embedding_dim, mlp_dim=bin_embedding_dim//2) + for num_out in num_out_features + ]) + + # Create a map (ModuleDict) of 'name' -> attractors (ModuleList) + self.attractors = nn.ModuleDict( + {conf['name']: nn.ModuleList([ + Attractor(bin_embedding_dim, n_attractors[i], + mlp_dim=bin_embedding_dim, alpha=attractor_alpha, + gamma=attractor_gamma, kind=attractor_kind, + attractor_type=attractor_type, memory_efficient=memory_efficient, + min_depth=conf["min_depth"], max_depth=conf["max_depth"]) + for i in range(len(n_attractors)) + ]) + for conf in bin_conf} + ) + + last_in = N_MIDAS_OUT + # conditional log binomial for each bin conf + self.conditional_log_binomial = nn.ModuleDict( + {conf['name']: ConditionalLogBinomial(last_in, bin_embedding_dim, conf['n_bins'], bottleneck_factor=4, min_temp=self.min_temp, max_temp=self.max_temp) + for conf in bin_conf} + ) + + def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs): + """ + Args: + x (torch.Tensor): Input image tensor of shape (B, C, H, W). Assumes all images are from the same domain. + return_final_centers (bool, optional): Whether to return the final centers of the attractors. Defaults to False. + denorm (bool, optional): Whether to denormalize the input image. Defaults to False. + return_probs (bool, optional): Whether to return the probabilities of the bins. Defaults to False. + + Returns: + dict: Dictionary of outputs with keys: + - "rel_depth": Relative depth map of shape (B, 1, H, W) + - "metric_depth": Metric depth map of shape (B, 1, H, W) + - "domain_logits": Domain logits of shape (B, 2) + - "bin_centers": Bin centers of shape (B, N, H, W). Present only if return_final_centers is True + - "probs": Bin probabilities of shape (B, N, H, W). Present only if return_probs is True + """ + b, c, h, w = x.shape + self.orig_input_width = w + self.orig_input_height = h + rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True) + + outconv_activation = out[0] + btlnck = out[1] + x_blocks = out[2:] + + x_d0 = self.conv2(btlnck) + x = x_d0 + + # Predict which path to take + embedding = self.patch_transformer(x)[0] # N, E + domain_logits = self.mlp_classifier(embedding) # N, 2 + domain_vote = torch.softmax(domain_logits.sum( + dim=0, keepdim=True), dim=-1) # 1, 2 + + # Get the path + bin_conf_name = ["nyu", "kitti"][torch.argmax( + domain_vote, dim=-1).squeeze().item()] + + try: + conf = [c for c in self.bin_conf if c.name == bin_conf_name][0] + except IndexError: + raise ValueError( + f"bin_conf_name {bin_conf_name} not found in bin_confs") + + min_depth = conf['min_depth'] + max_depth = conf['max_depth'] + + seed_bin_regressor = self.seed_bin_regressors[bin_conf_name] + _, seed_b_centers = seed_bin_regressor(x) + if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': + b_prev = (seed_b_centers - min_depth)/(max_depth - min_depth) + else: + b_prev = seed_b_centers + prev_b_embedding = self.seed_projector(x) + + attractors = self.attractors[bin_conf_name] + for projector, attractor, x in zip(self.projectors, attractors, x_blocks): + b_embedding = projector(x) + b, b_centers = attractor( + b_embedding, b_prev, prev_b_embedding, interpolate=True) + b_prev = b + prev_b_embedding = b_embedding + + last = outconv_activation + + b_centers = nn.functional.interpolate( + b_centers, last.shape[-2:], mode='bilinear', align_corners=True) + b_embedding = nn.functional.interpolate( + b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) + + clb = self.conditional_log_binomial[bin_conf_name] + x = clb(last, b_embedding) + + # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor + # print(x.shape, b_centers.shape) + # b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True) + out = torch.sum(x * b_centers, dim=1, keepdim=True) + + output = dict(domain_logits=domain_logits, metric_depth=out) + if return_final_centers or return_probs: + output['bin_centers'] = b_centers + + if return_probs: + output['probs'] = x + return output + + def get_lr_params(self, lr): + """ + Learning rate configuration for different layers of the model + + Args: + lr (float) : Base learning rate + Returns: + list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. + """ + param_conf = [] + if self.train_midas: + def get_rel_pos_params(): + for name, p in self.core.core.pretrained.named_parameters(): + if "relative_position" in name: + yield p + + def get_enc_params_except_rel_pos(): + for name, p in self.core.core.pretrained.named_parameters(): + if "relative_position" not in name: + yield p + + encoder_params = get_enc_params_except_rel_pos() + rel_pos_params = get_rel_pos_params() + midas_params = self.core.core.scratch.parameters() + midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0 + param_conf.extend([ + {'params': encoder_params, 'lr': lr / self.encoder_lr_factor}, + {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor}, + {'params': midas_params, 'lr': lr / midas_lr_factor} + ]) + + remaining_modules = [] + for name, child in self.named_children(): + if name != 'core': + remaining_modules.append(child) + remaining_params = itertools.chain( + *[child.parameters() for child in remaining_modules]) + param_conf.append({'params': remaining_params, 'lr': lr}) + return param_conf + + def get_conf_parameters(self, conf_name): + """ + Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration + """ + params = [] + for name, child in self.named_children(): + if isinstance(child, nn.ModuleDict): + for bin_conf_name, module in child.items(): + if bin_conf_name == conf_name: + params += list(module.parameters()) + return params + + def freeze_conf(self, conf_name): + """ + Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration + """ + for p in self.get_conf_parameters(conf_name): + p.requires_grad = False + + def unfreeze_conf(self, conf_name): + """ + Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration + """ + for p in self.get_conf_parameters(conf_name): + p.requires_grad = True + + def freeze_all_confs(self): + """ + Freezes all the parameters of all the ModuleDicts children + """ + for name, child in self.named_children(): + if isinstance(child, nn.ModuleDict): + for bin_conf_name, module in child.items(): + for p in module.parameters(): + p.requires_grad = False + + @staticmethod + def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs): + core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas, + train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs) + model = ZoeDepthNK(core, **kwargs) + if pretrained_resource: + assert isinstance(pretrained_resource, str), "pretrained_resource must be a string" + model = load_state_from_resource(model, pretrained_resource) + return model + + @staticmethod + def build_from_config(config): + return ZoeDepthNK.build(**config) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/__init__.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/__init__.py @@ -0,0 +1,24 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/arg_utils.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/arg_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8a3004ec3679c0a40fd8961253733fb4343ad545 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/arg_utils.py @@ -0,0 +1,33 @@ + + +def infer_type(x): # hacky way to infer type from string args + if not isinstance(x, str): + return x + + try: + x = int(x) + return x + except ValueError: + pass + + try: + x = float(x) + return x + except ValueError: + pass + + return x + + +def parse_unknown(unknown_args): + clean = [] + for a in unknown_args: + if "=" in a: + k, v = a.split("=") + clean.extend([k, v]) + else: + clean.append(a) + + keys = clean[::2] + values = clean[1::2] + return {k.replace("--", ""): infer_type(v) for k, v in zip(keys, values)} diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/config.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/config.py new file mode 100644 index 0000000000000000000000000000000000000000..363b0e186cd1247e8f5fc224e6f69f5fc8190c99 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/config.py @@ -0,0 +1,437 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import json +import os + +from zoedepth.utils.easydict import EasyDict as edict + +from zoedepth.utils.arg_utils import infer_type +import pathlib +import platform + +ROOT = pathlib.Path(__file__).parent.parent.resolve() + +HOME_DIR = os.path.expanduser("~") + +COMMON_CONFIG = { + "save_dir": os.path.expanduser("~/shortcuts/monodepth3_checkpoints"), + "project": "ZoeDepth", + "tags": '', + "notes": "", + "gpu": None, + "root": ".", + "uid": None, + "print_losses": False +} + +DATASETS_CONFIG = { + "kitti": { + "dataset": "kitti", + "min_depth": 0.001, + "max_depth": 80, + "data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"), + "gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"), + "filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt", + "input_height": 352, + "input_width": 1216, # 704 + "data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"), + "gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"), + "filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt", + + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + + "do_random_rotate": True, + "degree": 1.0, + "do_kb_crop": True, + "garg_crop": True, + "eigen_crop": False, + "use_right": False + }, + "kitti_test": { + "dataset": "kitti", + "min_depth": 0.001, + "max_depth": 80, + "data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"), + "gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"), + "filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt", + "input_height": 352, + "input_width": 1216, + "data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"), + "gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"), + "filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt", + + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + + "do_random_rotate": False, + "degree": 1.0, + "do_kb_crop": True, + "garg_crop": True, + "eigen_crop": False, + "use_right": False + }, + "nyu": { + "dataset": "nyu", + "avoid_boundary": False, + "min_depth": 1e-3, # originally 0.1 + "max_depth": 10, + "data_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"), + "gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"), + "filenames_file": "./train_test_inputs/nyudepthv2_train_files_with_gt.txt", + "input_height": 480, + "input_width": 640, + "data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"), + "gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"), + "filenames_file_eval": "./train_test_inputs/nyudepthv2_test_files_with_gt.txt", + "min_depth_eval": 1e-3, + "max_depth_eval": 10, + "min_depth_diff": -10, + "max_depth_diff": 10, + + "do_random_rotate": True, + "degree": 1.0, + "do_kb_crop": False, + "garg_crop": False, + "eigen_crop": True + }, + "ibims": { + "dataset": "ibims", + "ibims_root": os.path.join(HOME_DIR, "shortcuts/datasets/ibims/ibims1_core_raw/"), + "eigen_crop": True, + "garg_crop": False, + "do_kb_crop": False, + "min_depth_eval": 0, + "max_depth_eval": 10, + "min_depth": 1e-3, + "max_depth": 10 + }, + "sunrgbd": { + "dataset": "sunrgbd", + "sunrgbd_root": os.path.join(HOME_DIR, "shortcuts/datasets/SUNRGBD/test/"), + "eigen_crop": True, + "garg_crop": False, + "do_kb_crop": False, + "min_depth_eval": 0, + "max_depth_eval": 8, + "min_depth": 1e-3, + "max_depth": 10 + }, + "diml_indoor": { + "dataset": "diml_indoor", + "diml_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_indoor_test/"), + "eigen_crop": True, + "garg_crop": False, + "do_kb_crop": False, + "min_depth_eval": 0, + "max_depth_eval": 10, + "min_depth": 1e-3, + "max_depth": 10 + }, + "diml_outdoor": { + "dataset": "diml_outdoor", + "diml_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_outdoor_test/"), + "eigen_crop": False, + "garg_crop": True, + "do_kb_crop": False, + "min_depth_eval": 2, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 80 + }, + "diode_indoor": { + "dataset": "diode_indoor", + "diode_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_indoor/"), + "eigen_crop": True, + "garg_crop": False, + "do_kb_crop": False, + "min_depth_eval": 1e-3, + "max_depth_eval": 10, + "min_depth": 1e-3, + "max_depth": 10 + }, + "diode_outdoor": { + "dataset": "diode_outdoor", + "diode_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_outdoor/"), + "eigen_crop": False, + "garg_crop": True, + "do_kb_crop": False, + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 80 + }, + "hypersim_test": { + "dataset": "hypersim_test", + "hypersim_test_root": os.path.join(HOME_DIR, "shortcuts/datasets/hypersim_test/"), + "eigen_crop": True, + "garg_crop": False, + "do_kb_crop": False, + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 10 + }, + "vkitti": { + "dataset": "vkitti", + "vkitti_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti_test/"), + "eigen_crop": False, + "garg_crop": True, + "do_kb_crop": True, + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 80 + }, + "vkitti2": { + "dataset": "vkitti2", + "vkitti2_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti2/"), + "eigen_crop": False, + "garg_crop": True, + "do_kb_crop": True, + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 80, + }, + "ddad": { + "dataset": "ddad", + "ddad_root": os.path.join(HOME_DIR, "shortcuts/datasets/ddad/ddad_val/"), + "eigen_crop": False, + "garg_crop": True, + "do_kb_crop": True, + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 80, + }, +} + +ALL_INDOOR = ["nyu", "ibims", "sunrgbd", "diode_indoor", "hypersim_test"] +ALL_OUTDOOR = ["kitti", "diml_outdoor", "diode_outdoor", "vkitti2", "ddad"] +ALL_EVAL_DATASETS = ALL_INDOOR + ALL_OUTDOOR + +COMMON_TRAINING_CONFIG = { + "dataset": "nyu", + "distributed": True, + "workers": 16, + "clip_grad": 0.1, + "use_shared_dict": False, + "shared_dict": None, + "use_amp": False, + + "aug": True, + "random_crop": False, + "random_translate": False, + "translate_prob": 0.2, + "max_translation": 100, + + "validate_every": 0.25, + "log_images_every": 0.1, + "prefetch": False, +} + + +def flatten(config, except_keys=('bin_conf')): + def recurse(inp): + if isinstance(inp, dict): + for key, value in inp.items(): + if key in except_keys: + yield (key, value) + if isinstance(value, dict): + yield from recurse(value) + else: + yield (key, value) + + return dict(list(recurse(config))) + + +def split_combined_args(kwargs): + """Splits the arguments that are combined with '__' into multiple arguments. + Combined arguments should have equal number of keys and values. + Keys are separated by '__' and Values are separated with ';'. + For example, '__n_bins__lr=256;0.001' + + Args: + kwargs (dict): key-value pairs of arguments where key-value is optionally combined according to the above format. + + Returns: + dict: Parsed dict with the combined arguments split into individual key-value pairs. + """ + new_kwargs = dict(kwargs) + for key, value in kwargs.items(): + if key.startswith("__"): + keys = key.split("__")[1:] + values = value.split(";") + assert len(keys) == len( + values), f"Combined arguments should have equal number of keys and values. Keys are separated by '__' and Values are separated with ';'. For example, '__n_bins__lr=256;0.001. Given (keys,values) is ({keys}, {values})" + for k, v in zip(keys, values): + new_kwargs[k] = v + return new_kwargs + + +def parse_list(config, key, dtype=int): + """Parse a list of values for the key if the value is a string. The values are separated by a comma. + Modifies the config in place. + """ + if key in config: + if isinstance(config[key], str): + config[key] = list(map(dtype, config[key].split(','))) + assert isinstance(config[key], list) and all([isinstance(e, dtype) for e in config[key]] + ), f"{key} should be a list of values dtype {dtype}. Given {config[key]} of type {type(config[key])} with values of type {[type(e) for e in config[key]]}." + + +def get_model_config(model_name, model_version=None): + """Find and parse the .json config file for the model. + + Args: + model_name (str): name of the model. The config file should be named config_{model_name}[_{model_version}].json under the models/{model_name} directory. + model_version (str, optional): Specific config version. If specified config_{model_name}_{model_version}.json is searched for and used. Otherwise config_{model_name}.json is used. Defaults to None. + + Returns: + easydict: the config dictionary for the model. + """ + config_fname = f"config_{model_name}_{model_version}.json" if model_version is not None else f"config_{model_name}.json" + config_file = os.path.join(ROOT, "models", model_name, config_fname) + if not os.path.exists(config_file): + return None + + with open(config_file, "r") as f: + config = edict(json.load(f)) + + # handle dictionary inheritance + # only training config is supported for inheritance + if "inherit" in config.train and config.train.inherit is not None: + inherit_config = get_model_config(config.train["inherit"]).train + for key, value in inherit_config.items(): + if key not in config.train: + config.train[key] = value + return edict(config) + + +def update_model_config(config, mode, model_name, model_version=None, strict=False): + model_config = get_model_config(model_name, model_version) + if model_config is not None: + config = {**config, ** + flatten({**model_config.model, **model_config[mode]})} + elif strict: + raise ValueError(f"Config file for model {model_name} not found.") + return config + + +def check_choices(name, value, choices): + # return # No checks in dev branch + if value not in choices: + raise ValueError(f"{name} {value} not in supported choices {choices}") + + +KEYS_TYPE_BOOL = ["use_amp", "distributed", "use_shared_dict", "same_lr", "aug", "three_phase", + "prefetch", "cycle_momentum"] # Casting is not necessary as their int casted values in config are 0 or 1 + + +def get_config(model_name, mode='train', dataset=None, **overwrite_kwargs): + """Main entry point to get the config for the model. + + Args: + model_name (str): name of the desired model. + mode (str, optional): "train" or "infer". Defaults to 'train'. + dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None. + + Keyword Args: key-value pairs of arguments to overwrite the default config. + + The order of precedence for overwriting the config is (Higher precedence first): + # 1. overwrite_kwargs + # 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json + # 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json + # 4. common_config: Default config for all models specified in COMMON_CONFIG + + Returns: + easydict: The config dictionary for the model. + """ + + + check_choices("Model", model_name, ["zoedepth", "zoedepth_nk"]) + check_choices("Mode", mode, ["train", "infer", "eval"]) + if mode == "train": + check_choices("Dataset", dataset, ["nyu", "kitti", "mix", None]) + + config = flatten({**COMMON_CONFIG, **COMMON_TRAINING_CONFIG}) + config = update_model_config(config, mode, model_name) + + # update with model version specific config + version_name = overwrite_kwargs.get("version_name", config["version_name"]) + config = update_model_config(config, mode, model_name, version_name) + + # update with config version if specified + config_version = overwrite_kwargs.get("config_version", None) + if config_version is not None: + print("Overwriting config with config_version", config_version) + config = update_model_config(config, mode, model_name, config_version) + + # update with overwrite_kwargs + # Combined args are useful for hyperparameter search + overwrite_kwargs = split_combined_args(overwrite_kwargs) + config = {**config, **overwrite_kwargs} + + # Casting to bool # TODO: Not necessary. Remove and test + for key in KEYS_TYPE_BOOL: + if key in config: + config[key] = bool(config[key]) + + # Model specific post processing of config + parse_list(config, "n_attractors") + + # adjust n_bins for each bin configuration if bin_conf is given and n_bins is passed in overwrite_kwargs + if 'bin_conf' in config and 'n_bins' in overwrite_kwargs: + bin_conf = config['bin_conf'] # list of dicts + n_bins = overwrite_kwargs['n_bins'] + new_bin_conf = [] + for conf in bin_conf: + conf['n_bins'] = n_bins + new_bin_conf.append(conf) + config['bin_conf'] = new_bin_conf + + if mode == "train": + orig_dataset = dataset + if dataset == "mix": + dataset = 'nyu' # Use nyu as default for mix. Dataset config is changed accordingly while loading the dataloader + if dataset is not None: + config['project'] = f"MonoDepth3-{orig_dataset}" # Set project for wandb + + if dataset is not None: + config['dataset'] = dataset + config = {**DATASETS_CONFIG[dataset], **config} + + + config['model'] = model_name + typed_config = {k: infer_type(v) for k, v in config.items()} + # add hostname to config + config['hostname'] = platform.node() + return edict(typed_config) + + +def change_dataset(config, new_dataset): + config.update(DATASETS_CONFIG[new_dataset]) + return config diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/easydict/__init__.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/easydict/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..15928179b0182c6045d98bc0a7be1c6ca45f675e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/easydict/__init__.py @@ -0,0 +1,158 @@ +""" +EasyDict +Copy/pasted from https://github.com/makinacorpus/easydict +Original author: Mathieu Leplatre +""" + +class EasyDict(dict): + """ + Get attributes + + >>> d = EasyDict({'foo':3}) + >>> d['foo'] + 3 + >>> d.foo + 3 + >>> d.bar + Traceback (most recent call last): + ... + AttributeError: 'EasyDict' object has no attribute 'bar' + + Works recursively + + >>> d = EasyDict({'foo':3, 'bar':{'x':1, 'y':2}}) + >>> isinstance(d.bar, dict) + True + >>> d.bar.x + 1 + + Bullet-proof + + >>> EasyDict({}) + {} + >>> EasyDict(d={}) + {} + >>> EasyDict(None) + {} + >>> d = {'a': 1} + >>> EasyDict(**d) + {'a': 1} + >>> EasyDict((('a', 1), ('b', 2))) + {'a': 1, 'b': 2} + + Set attributes + + >>> d = EasyDict() + >>> d.foo = 3 + >>> d.foo + 3 + >>> d.bar = {'prop': 'value'} + >>> d.bar.prop + 'value' + >>> d + {'foo': 3, 'bar': {'prop': 'value'}} + >>> d.bar.prop = 'newer' + >>> d.bar.prop + 'newer' + + + Values extraction + + >>> d = EasyDict({'foo':0, 'bar':[{'x':1, 'y':2}, {'x':3, 'y':4}]}) + >>> isinstance(d.bar, list) + True + >>> from operator import attrgetter + >>> list(map(attrgetter('x'), d.bar)) + [1, 3] + >>> list(map(attrgetter('y'), d.bar)) + [2, 4] + >>> d = EasyDict() + >>> list(d.keys()) + [] + >>> d = EasyDict(foo=3, bar=dict(x=1, y=2)) + >>> d.foo + 3 + >>> d.bar.x + 1 + + Still like a dict though + + >>> o = EasyDict({'clean':True}) + >>> list(o.items()) + [('clean', True)] + + And like a class + + >>> class Flower(EasyDict): + ... power = 1 + ... + >>> f = Flower() + >>> f.power + 1 + >>> f = Flower({'height': 12}) + >>> f.height + 12 + >>> f['power'] + 1 + >>> sorted(f.keys()) + ['height', 'power'] + + update and pop items + >>> d = EasyDict(a=1, b='2') + >>> e = EasyDict(c=3.0, a=9.0) + >>> d.update(e) + >>> d.c + 3.0 + >>> d['c'] + 3.0 + >>> d.get('c') + 3.0 + >>> d.update(a=4, b=4) + >>> d.b + 4 + >>> d.pop('a') + 4 + >>> d.a + Traceback (most recent call last): + ... + AttributeError: 'EasyDict' object has no attribute 'a' + """ + def __init__(self, d=None, **kwargs): + if d is None: + d = {} + else: + d = dict(d) + if kwargs: + d.update(**kwargs) + for k, v in d.items(): + setattr(self, k, v) + # Class attributes + for k in self.__class__.__dict__.keys(): + if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'): + setattr(self, k, getattr(self, k)) + + def __setattr__(self, name, value): + if isinstance(value, (list, tuple)): + value = [self.__class__(x) + if isinstance(x, dict) else x for x in value] + elif isinstance(value, dict) and not isinstance(value, self.__class__): + value = self.__class__(value) + super(EasyDict, self).__setattr__(name, value) + super(EasyDict, self).__setitem__(name, value) + + __setitem__ = __setattr__ + + def update(self, e=None, **f): + d = e or dict() + d.update(f) + for k in d: + setattr(self, k, d[k]) + + def pop(self, k, d=None): + delattr(self, k) + return super(EasyDict, self).pop(k, d) + + +if __name__ == "__main__": + import doctest + doctest.testmod() \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/geometry.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..e3da8c75b5a8e39b4b58a4dcd827b84d79b9115c --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/geometry.py @@ -0,0 +1,98 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import numpy as np + +def get_intrinsics(H,W): + """ + Intrinsics for a pinhole camera model. + Assume fov of 55 degrees and central principal point. + """ + f = 0.5 * W / np.tan(0.5 * 55 * np.pi / 180.0) + cx = 0.5 * W + cy = 0.5 * H + return np.array([[f, 0, cx], + [0, f, cy], + [0, 0, 1]]) + +def depth_to_points(depth, R=None, t=None): + + K = get_intrinsics(depth.shape[1], depth.shape[2]) + Kinv = np.linalg.inv(K) + if R is None: + R = np.eye(3) + if t is None: + t = np.zeros(3) + + # M converts from your coordinate to PyTorch3D's coordinate system + M = np.eye(3) + M[0, 0] = -1.0 + M[1, 1] = -1.0 + + height, width = depth.shape[1:3] + + x = np.arange(width) + y = np.arange(height) + coord = np.stack(np.meshgrid(x, y), -1) + coord = np.concatenate((coord, np.ones_like(coord)[:, :, [0]]), -1) # z=1 + coord = coord.astype(np.float32) + # coord = torch.as_tensor(coord, dtype=torch.float32, device=device) + coord = coord[None] # bs, h, w, 3 + + D = depth[:, :, :, None, None] + # print(D.shape, Kinv[None, None, None, ...].shape, coord[:, :, :, :, None].shape ) + pts3D_1 = D * Kinv[None, None, None, ...] @ coord[:, :, :, :, None] + # pts3D_1 live in your coordinate system. Convert them to Py3D's + pts3D_1 = M[None, None, None, ...] @ pts3D_1 + # from reference to targe tviewpoint + pts3D_2 = R[None, None, None, ...] @ pts3D_1 + t[None, None, None, :, None] + # pts3D_2 = pts3D_1 + # depth_2 = pts3D_2[:, :, :, 2, :] # b,1,h,w + return pts3D_2[:, :, :, :3, 0][0] + + +def create_triangles(h, w, mask=None): + """ + Reference: https://github.com/google-research/google-research/blob/e96197de06613f1b027d20328e06d69829fa5a89/infinite_nature/render_utils.py#L68 + Creates mesh triangle indices from a given pixel grid size. + This function is not and need not be differentiable as triangle indices are + fixed. + Args: + h: (int) denoting the height of the image. + w: (int) denoting the width of the image. + Returns: + triangles: 2D numpy array of indices (int) with shape (2(W-1)(H-1) x 3) + """ + x, y = np.meshgrid(range(w - 1), range(h - 1)) + tl = y * w + x + tr = y * w + x + 1 + bl = (y + 1) * w + x + br = (y + 1) * w + x + 1 + triangles = np.array([tl, bl, tr, br, tr, bl]) + triangles = np.transpose(triangles, (1, 2, 0)).reshape( + ((w - 1) * (h - 1) * 2, 3)) + if mask is not None: + mask = mask.reshape(-1) + triangles = triangles[mask[triangles].all(1)] + return triangles diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/misc.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..4bbe403d3669829eecdf658458c76aa5e87e2b33 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/src/zoedepth/utils/misc.py @@ -0,0 +1,368 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +"""Miscellaneous utility functions.""" + +from scipy import ndimage + +import base64 +import math +import re +from io import BytesIO + +import matplotlib +import matplotlib.cm +import numpy as np +import requests +import torch +import torch.distributed as dist +import torch.nn +import torch.nn as nn +import torch.utils.data.distributed +from PIL import Image +from torchvision.transforms import ToTensor + + +class RunningAverage: + def __init__(self): + self.avg = 0 + self.count = 0 + + def append(self, value): + self.avg = (value + self.count * self.avg) / (self.count + 1) + self.count += 1 + + def get_value(self): + return self.avg + + +def denormalize(x): + """Reverses the imagenet normalization applied to the input. + + Args: + x (torch.Tensor - shape(N,3,H,W)): input tensor + + Returns: + torch.Tensor - shape(N,3,H,W): Denormalized input + """ + mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device) + std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device) + return x * std + mean + + +class RunningAverageDict: + """A dictionary of running averages.""" + def __init__(self): + self._dict = None + + def update(self, new_dict): + if new_dict is None: + return + + if self._dict is None: + self._dict = dict() + for key, value in new_dict.items(): + self._dict[key] = RunningAverage() + + for key, value in new_dict.items(): + self._dict[key].append(value) + + def get_value(self): + if self._dict is None: + return None + return {key: value.get_value() for key, value in self._dict.items()} + + +def colorize(value, vmin=None, vmax=None, cmap='gray_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None): + """Converts a depth map to a color image. + + Args: + value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed + vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None. + vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None. + cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'. + invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99. + invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None. + background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255). + gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False. + value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None. + + Returns: + numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4) + """ + if isinstance(value, torch.Tensor): + value = value.detach().cpu().numpy() + + value = value.squeeze() + if invalid_mask is None: + invalid_mask = value == invalid_val + mask = np.logical_not(invalid_mask) + + # normalize + vmin = np.percentile(value[mask],2) if vmin is None else vmin + vmax = np.percentile(value[mask],85) if vmax is None else vmax + if vmin != vmax: + value = (value - vmin) / (vmax - vmin) # vmin..vmax + else: + # Avoid 0-division + value = value * 0. + + # squeeze last dim if it exists + # grey out the invalid values + + value[invalid_mask] = np.nan + cmapper = matplotlib.cm.get_cmap(cmap) + if value_transform: + value = value_transform(value) + # value = value / value.max() + value = cmapper(value, bytes=True) # (nxmx4) + + # img = value[:, :, :] + img = value[...] + img[invalid_mask] = background_color + + # return img.transpose((2, 0, 1)) + if gamma_corrected: + # gamma correction + img = img / 255 + img = np.power(img, 2.2) + img = img * 255 + img = img.astype(np.uint8) + return img + + +def count_parameters(model, include_all=False): + return sum(p.numel() for p in model.parameters() if p.requires_grad or include_all) + + +def compute_errors(gt, pred): + """Compute metrics for 'pred' compared to 'gt' + + Args: + gt (numpy.ndarray): Ground truth values + pred (numpy.ndarray): Predicted values + + gt.shape should be equal to pred.shape + + Returns: + dict: Dictionary containing the following metrics: + 'a1': Delta1 accuracy: Fraction of pixels that are within a scale factor of 1.25 + 'a2': Delta2 accuracy: Fraction of pixels that are within a scale factor of 1.25^2 + 'a3': Delta3 accuracy: Fraction of pixels that are within a scale factor of 1.25^3 + 'abs_rel': Absolute relative error + 'rmse': Root mean squared error + 'log_10': Absolute log10 error + 'sq_rel': Squared relative error + 'rmse_log': Root mean squared error on the log scale + 'silog': Scale invariant log error + """ + thresh = np.maximum((gt / pred), (pred / gt)) + a1 = (thresh < 1.25).mean() + a2 = (thresh < 1.25 ** 2).mean() + a3 = (thresh < 1.25 ** 3).mean() + + abs_rel = np.mean(np.abs(gt - pred) / gt) + sq_rel = np.mean(((gt - pred) ** 2) / gt) + + rmse = (gt - pred) ** 2 + rmse = np.sqrt(rmse.mean()) + + rmse_log = (np.log(gt) - np.log(pred)) ** 2 + rmse_log = np.sqrt(rmse_log.mean()) + + err = np.log(pred) - np.log(gt) + silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100 + + log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean() + return dict(a1=a1, a2=a2, a3=a3, abs_rel=abs_rel, rmse=rmse, log_10=log_10, rmse_log=rmse_log, + silog=silog, sq_rel=sq_rel) + + +def compute_metrics(gt, pred, interpolate=True, garg_crop=False, eigen_crop=True, dataset='nyu', min_depth_eval=0.1, max_depth_eval=10, **kwargs): + """Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics. + """ + if 'config' in kwargs: + config = kwargs['config'] + garg_crop = config.garg_crop + eigen_crop = config.eigen_crop + min_depth_eval = config.min_depth_eval + max_depth_eval = config.max_depth_eval + + if gt.shape[-2:] != pred.shape[-2:] and interpolate: + pred = nn.functional.interpolate( + pred, gt.shape[-2:], mode='bilinear', align_corners=True) + + pred = pred.squeeze().cpu().numpy() + pred[pred < min_depth_eval] = min_depth_eval + pred[pred > max_depth_eval] = max_depth_eval + pred[np.isinf(pred)] = max_depth_eval + pred[np.isnan(pred)] = min_depth_eval + + gt_depth = gt.squeeze().cpu().numpy() + valid_mask = np.logical_and( + gt_depth > min_depth_eval, gt_depth < max_depth_eval) + + if garg_crop or eigen_crop: + gt_height, gt_width = gt_depth.shape + eval_mask = np.zeros(valid_mask.shape) + + if garg_crop: + eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height), + int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1 + + elif eigen_crop: + # print("-"*10, " EIGEN CROP ", "-"*10) + if dataset == 'kitti': + eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height), + int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1 + else: + # assert gt_depth.shape == (480, 640), "Error: Eigen crop is currently only valid for (480, 640) images" + eval_mask[45:471, 41:601] = 1 + else: + eval_mask = np.ones(valid_mask.shape) + valid_mask = np.logical_and(valid_mask, eval_mask) + return compute_errors(gt_depth[valid_mask], pred[valid_mask]) + + +#################################### Model uilts ################################################ + + +def parallelize(config, model, find_unused_parameters=True): + + if config.gpu is not None: + torch.cuda.set_device(config.gpu) + model = model.cuda(config.gpu) + + config.multigpu = False + if config.distributed: + # Use DDP + config.multigpu = True + config.rank = config.rank * config.ngpus_per_node + config.gpu + dist.init_process_group(backend=config.dist_backend, init_method=config.dist_url, + world_size=config.world_size, rank=config.rank) + config.batch_size = int(config.batch_size / config.ngpus_per_node) + # config.batch_size = 8 + config.workers = int( + (config.num_workers + config.ngpus_per_node - 1) / config.ngpus_per_node) + print("Device", config.gpu, "Rank", config.rank, "batch size", + config.batch_size, "Workers", config.workers) + torch.cuda.set_device(config.gpu) + model = nn.SyncBatchNorm.convert_sync_batchnorm(model) + model = model.cuda(config.gpu) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.gpu], output_device=config.gpu, + find_unused_parameters=find_unused_parameters) + + elif config.gpu is None: + # Use DP + config.multigpu = True + model = model.cuda() + model = torch.nn.DataParallel(model) + + return model + + +################################################################################################# + + +##################################################################################################### + + +class colors: + '''Colors class: + Reset all colors with colors.reset + Two subclasses fg for foreground and bg for background. + Use as colors.subclass.colorname. + i.e. colors.fg.red or colors.bg.green + Also, the generic bold, disable, underline, reverse, strikethrough, + and invisible work with the main class + i.e. colors.bold + ''' + reset = '\033[0m' + bold = '\033[01m' + disable = '\033[02m' + underline = '\033[04m' + reverse = '\033[07m' + strikethrough = '\033[09m' + invisible = '\033[08m' + + class fg: + black = '\033[30m' + red = '\033[31m' + green = '\033[32m' + orange = '\033[33m' + blue = '\033[34m' + purple = '\033[35m' + cyan = '\033[36m' + lightgrey = '\033[37m' + darkgrey = '\033[90m' + lightred = '\033[91m' + lightgreen = '\033[92m' + yellow = '\033[93m' + lightblue = '\033[94m' + pink = '\033[95m' + lightcyan = '\033[96m' + + class bg: + black = '\033[40m' + red = '\033[41m' + green = '\033[42m' + orange = '\033[43m' + blue = '\033[44m' + purple = '\033[45m' + cyan = '\033[46m' + lightgrey = '\033[47m' + + +def printc(text, color): + print(f"{color}{text}{colors.reset}") + +############################################ + +def get_image_from_url(url): + response = requests.get(url) + img = Image.open(BytesIO(response.content)).convert("RGB") + return img + +def url_to_torch(url, size=(384, 384)): + img = get_image_from_url(url) + img = img.resize(size, Image.ANTIALIAS) + img = torch.from_numpy(np.asarray(img)).float() + img = img.permute(2, 0, 1) + img.div_(255) + return img + +def pil_to_batched_tensor(img): + return ToTensor()(img).unsqueeze(0) + +def save_raw_16bit(depth, fpath="raw.png"): + if isinstance(depth, torch.Tensor): + depth = depth.squeeze().cpu().numpy() + + assert isinstance(depth, np.ndarray), "Depth must be a torch tensor or numpy array" + assert depth.ndim == 2, "Depth must be 2D" + depth = depth * 256 # scale for 16-bit png + depth = depth.astype(np.uint16) + depth = Image.fromarray(depth) + depth.save(fpath) + print("Saved raw depth to", fpath) \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/subtitle_handler.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/subtitle_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..6d004e1089ad1e3cecfd5f9dfbac6347215d1069 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/subtitle_handler.py @@ -0,0 +1,113 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +from decimal import Decimal, getcontext + +param_dict = { + "angle": {"backend": "angle_series", "user": "Angle", "print": "Angle"}, + "transform_center_x": {"backend": "transform_center_x_series", "user": "Trans Center X", "print": "Tr.C.X"}, + "transform_center_y": {"backend": "transform_center_y_series", "user": "Trans Center Y", "print": "Tr.C.Y"}, + "zoom": {"backend": "zoom_series", "user": "Zoom", "print": "Zoom"}, + "translation_x": {"backend": "translation_x_series", "user": "Trans X", "print": "TrX"}, + "translation_y": {"backend": "translation_y_series", "user": "Trans Y", "print": "TrY"}, + "translation_z": {"backend": "translation_z_series", "user": "Trans Z", "print": "TrZ"}, + "rotation_3d_x": {"backend": "rotation_3d_x_series", "user": "Rot 3D X", "print": "RotX"}, + "rotation_3d_y": {"backend": "rotation_3d_y_series", "user": "Rot 3D Y", "print": "RotY"}, + "rotation_3d_z": {"backend": "rotation_3d_z_series", "user": "Rot 3D Z", "print": "RotZ"}, + "perspective_flip_theta": {"backend": "perspective_flip_theta_series", "user": "Per Fl Theta", "print": "PerFlT"}, + "perspective_flip_phi": {"backend": "perspective_flip_phi_series", "user": "Per Fl Phi", "print": "PerFlP"}, + "perspective_flip_gamma": {"backend": "perspective_flip_gamma_series", "user": "Per Fl Gamma", "print": "PerFlG"}, + "perspective_flip_fv": {"backend": "perspective_flip_fv_series", "user": "Per Fl FV", "print": "PerFlFV"}, + "noise_schedule": {"backend": "noise_schedule_series", "user": "Noise Sch", "print": "Noise"}, + "strength_schedule": {"backend": "strength_schedule_series", "user": "Str Sch", "print": "StrSch"}, + "contrast_schedule": {"backend": "contrast_schedule_series", "user": "Contrast Sch", "print": "CtrstSch"}, + "cfg_scale_schedule": {"backend": "cfg_scale_schedule_series", "user": "CFG Sch", "print": "CFGSch"}, + "pix2pix_img_cfg_scale_schedule": {"backend": "pix2pix_img_cfg_scale_series", "user": "P2P Img CFG Sch", "print": "P2PCfgSch"}, + "subseed_schedule": {"backend": "subseed_schedule_series", "user": "Subseed Sch", "print": "SubSSch"}, + "subseed_strength_schedule": {"backend": "subseed_strength_schedule_series", "user": "Subseed Str Sch", "print": "SubSStrSch"}, + "checkpoint_schedule": {"backend": "checkpoint_schedule_series", "user": "Ckpt Sch", "print": "CkptSch"}, + "steps_schedule": {"backend": "steps_schedule_series", "user": "Steps Sch", "print": "StepsSch"}, + "seed_schedule": {"backend": "seed_schedule_series", "user": "Seed Sch", "print": "SeedSch"}, + "sampler_schedule": {"backend": "sampler_schedule_series", "user": "Sampler Sch", "print": "SamplerSchedule"}, + "clipskip_schedule": {"backend": "clipskip_schedule_series", "user": "Clipskip Sch", "print": "ClipskipSchedule"}, + "noise_multiplier_schedule": {"backend": "noise_multiplier_schedule_series", "user": "Noise Multp Sch", "print": "NoiseMultiplierSchedule"}, + "mask_schedule": {"backend": "mask_schedule_series", "user": "Mask Sch", "print": "MaskSchedule"}, + "noise_mask_schedule": {"backend": "noise_mask_schedule_series", "user": "Noise Mask Sch", "print": "NoiseMaskSchedule"}, + "amount_schedule": {"backend": "amount_schedule_series", "user": "Ant.Blr Amount Sch", "print": "AmountSchedule"}, + "kernel_schedule": {"backend": "kernel_schedule_series", "user": "Ant.Blr Kernel Sch", "print": "KernelSchedule"}, + "sigma_schedule": {"backend": "sigma_schedule_series", "user": "Ant.Blr Sigma Sch", "print": "SigmaSchedule"}, + "threshold_schedule": {"backend": "threshold_schedule_series", "user": "Ant.Blr Threshold Sch", "print": "ThresholdSchedule"}, + "aspect_ratio_schedule": {"backend": "aspect_ratio_series", "user": "Aspect Ratio Sch", "print": "AspectRatioSchedule"}, + "fov_schedule": {"backend": "fov_series", "user": "FOV Sch", "print": "FieldOfViewSchedule"}, + "near_schedule": {"backend": "near_series", "user": "Near Sch", "print": "NearSchedule"}, + "cadence_flow_factor_schedule": {"backend": "cadence_flow_factor_schedule_series", "user": "Cadence Flow Factor Sch", "print": "CadenceFlowFactorSchedule"}, + "redo_flow_factor_schedule": {"backend": "redo_flow_factor_schedule_series", "user": "Redo Flow Factor Sch", "print": "RedoFlowFactorSchedule"}, + "far_schedule": {"backend": "far_series", "user": "Far Sch", "print": "FarSchedule"}, + "hybrid_comp_alpha_schedule": {"backend": "hybrid_comp_alpha_schedule_series", "user": "Hyb Comp Alpha Sch", "print": "HybridCompAlphaSchedule"}, + "hybrid_comp_mask_blend_alpha_schedule": {"backend": "hybrid_comp_mask_blend_alpha_schedule_series", "user": "Hyb Comp Mask Blend Alpha Sch", "print": "HybridCompMaskBlendAlphaSchedule"}, + "hybrid_comp_mask_contrast_schedule": {"backend": "hybrid_comp_mask_contrast_schedule_series", "user": "Hyb Comp Mask Ctrst Sch", "print": "HybridCompMaskContrastSchedule"}, + "hybrid_comp_mask_auto_contrast_cutoff_high_schedule": {"backend": "hybrid_comp_mask_auto_contrast_cutoff_high_schedule_series", "user": "Hyb Comp Mask Auto Contrast Cutoff High Sch", "print": "HybridCompMaskAutoContrastCutoffHighSchedule"}, + "hybrid_comp_mask_auto_contrast_cutoff_low_schedule": {"backend": "hybrid_comp_mask_auto_contrast_cutoff_low_schedule_series", "user": "Hyb Comp Mask Auto Ctrst Cut Low Sch", "print": "HybridCompMaskAutoContrastCutoffLowSchedule"}, + "hybrid_flow_factor_schedule": {"backend": "hybrid_flow_factor_schedule_series", "user": "Hybrid Flow Factor Sch", "print": "HybridFlowFactorSchedule"}, +} + +def time_to_srt_format(seconds): + hours, remainder = divmod(seconds, 3600) + minutes, remainder = divmod(remainder, 60) + seconds, milliseconds = divmod(remainder, 1) + return f"{hours:02}:{minutes:02}:{int(seconds):02},{int(milliseconds * 1000):03}" + +def init_srt_file(filename, fps, precision=20): + with open(filename, "w") as f: + pass + getcontext().prec = precision + frame_duration = Decimal(1) / Decimal(fps) + return frame_duration + +def write_frame_subtitle(filename, frame_number, frame_duration, text): + frame_start_time = Decimal(frame_number) * frame_duration + frame_end_time = (Decimal(frame_number) + Decimal(1)) * frame_duration + + with open(filename, "a") as f: + f.write(f"{frame_number + 1}\n") + f.write(f"{time_to_srt_format(frame_start_time)} --> {time_to_srt_format(frame_end_time)}\n") + f.write(f"{text}\n\n") + +def format_animation_params(keys, prompt_series, frame_idx, params_to_print): + params_string = "" + for key, value in param_dict.items(): + if value['user'] in params_to_print: + backend_key = value['backend'] + print_key = value['print'] + param_value = getattr(keys, backend_key)[frame_idx] + if isinstance(param_value, float) and param_value == int(param_value): + formatted_value = str(int(param_value)) + elif isinstance(param_value, float) and not param_value.is_integer(): + formatted_value = f"{param_value:.3f}" + else: + formatted_value = f"{param_value}" + params_string += f"{print_key}: {formatted_value}; " + + if "Prompt" in params_to_print: + params_string += f"Prompt: {prompt_series[frame_idx]}; " + + params_string = params_string.rstrip("; ") # Remove trailing semicolon and whitespace + return params_string + +def get_user_values(): + items = [v["user"] for v in param_dict.values()] + items.append("Prompt") + return items \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_elements.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_elements.py new file mode 100644 index 0000000000000000000000000000000000000000..a3b598adafefdc89e6941a59777f93a06f99640c --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_elements.py @@ -0,0 +1,550 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import gradio as gr +from modules.ui_components import FormRow, FormColumn +from .defaults import get_gradio_html, DeforumAnimPrompts +from .video_audio_utilities import direct_stitch_vid_from_frames +from .gradio_funcs import upload_vid_to_interpolate, upload_pics_to_interpolate, ncnn_upload_vid_to_upscale, upload_vid_to_depth + +def create_gr_elem(d): + # Capitalize and CamelCase the orig value under "type", which defines gr.inputs.type in lower_case. + # Examples: "dropdown" becomes gr.Dropdown, and "checkbox_group" becomes gr.CheckboxGroup. + obj_type_str = ''.join(word.title() for word in d["type"].split('_')) + obj_type = getattr(gr, obj_type_str) + + # Prepare parameters for gradio element creation + params = {k: v for k, v in d.items() if k != "type" and v is not None} + + # Special case: Since some elements can have 'type' parameter and we are already using 'type' to specify + # which element to use we need a separate parameter that will be used to overwrite 'type' at this point. + # E.g. for Radio element we should specify 'type_param' which is then used to set gr.radio's type. + if 'type_param' in params: + params['type'] = params.pop('type_param') + + return obj_type(**params) + +# ******** Important message ******** +# All get_tab functions use FormRow()/ FormColumn() by default, unless we have a gr.File inside that row/column, then we use gr.Row()/gr.Column() instead +# ******** Important message ******** +def get_tab_run(d, da): + with gr.TabItem('Run'): # RUN TAB + with FormRow(): + motion_preview_mode = create_gr_elem(d.motion_preview_mode) + with FormRow(): + sampler = create_gr_elem(d.sampler) + steps = create_gr_elem(d.steps) + with FormRow(): + W = create_gr_elem(d.W) + H = create_gr_elem(d.H) + with FormRow(): + seed = create_gr_elem(d.seed) + batch_name = create_gr_elem(d.batch_name) + with FormRow(): + restore_faces = create_gr_elem(d.restore_faces) + tiling = create_gr_elem(d.tiling) + enable_ddim_eta_scheduling = create_gr_elem(da.enable_ddim_eta_scheduling) + enable_ancestral_eta_scheduling = create_gr_elem(da.enable_ancestral_eta_scheduling) + with gr.Row(variant='compact') as eta_sch_row: + ddim_eta_schedule = create_gr_elem(da.ddim_eta_schedule) + ancestral_eta_schedule = create_gr_elem(da.ancestral_eta_schedule) + # RUN FROM SETTING FILE ACCORD + with gr.Accordion('Batch Mode, Resume and more', open=False): + with gr.Tab('Batch Mode/ run from setting files'): + with gr.Row(): # TODO: handle this inside one of the args functions? + override_settings_with_file = gr.Checkbox(label="Enable batch mode", value=False, interactive=True, elem_id='override_settings', + info="run from a list of setting .txt files. Upload them to the box on the right (visible when enabled)") + custom_settings_file = gr.File(label="Setting files", interactive=True, file_count="multiple", file_types=[".txt"], elem_id="custom_setting_file", visible=False) + # RESUME ANIMATION ACCORD + with gr.Tab('Resume Animation'): + with FormRow(): + resume_from_timestring = create_gr_elem(da.resume_from_timestring) + resume_timestring = create_gr_elem(da.resume_timestring) + with gr.Row(variant='compact') as pix2pix_img_cfg_scale_row: + pix2pix_img_cfg_scale_schedule = create_gr_elem(da.pix2pix_img_cfg_scale_schedule) + return {k: v for k, v in {**locals(), **vars()}.items()} + +def get_tab_keyframes(d, da, dloopArgs): + with gr.TabItem('Keyframes'): # TODO make a some sort of the original dictionary parsing + with FormRow(): + with FormColumn(scale=2): + animation_mode = create_gr_elem(da.animation_mode) + with FormColumn(scale=1, min_width=180): + border = create_gr_elem(da.border) + with FormRow(): + diffusion_cadence = create_gr_elem(da.diffusion_cadence) + max_frames = create_gr_elem(da.max_frames) + # GUIDED IMAGES ACCORD + with gr.Accordion('Guided Images', open=False, elem_id='guided_images_accord') as guided_images_accord: + # GUIDED IMAGES INFO ACCORD + with gr.Accordion('*READ ME before you use this mode!*', open=False): + gr.HTML(value=get_gradio_html('guided_imgs')) + with FormRow(): + use_looper = create_gr_elem(dloopArgs.use_looper) + with FormRow(): + init_images = create_gr_elem(dloopArgs.init_images) + # GUIDED IMAGES SCHEDULES ACCORD + with gr.Accordion('Guided images schedules', open=False): + with FormRow(): + image_strength_schedule = create_gr_elem(dloopArgs.image_strength_schedule) + with FormRow(): + blendFactorMax = create_gr_elem(dloopArgs.blendFactorMax) + with FormRow(): + blendFactorSlope = create_gr_elem(dloopArgs.blendFactorSlope) + with FormRow(): + tweening_frames_schedule = create_gr_elem(dloopArgs.tweening_frames_schedule) + with FormRow(): + color_correction_factor = create_gr_elem(dloopArgs.color_correction_factor) + # EXTRA SCHEDULES TABS + with gr.Tabs(): + with gr.TabItem('Strength'): + with FormRow(): + strength_schedule = create_gr_elem(da.strength_schedule) + with gr.TabItem('CFG'): + with FormRow(): + cfg_scale_schedule = create_gr_elem(da.cfg_scale_schedule) + with FormRow(): + enable_clipskip_scheduling = create_gr_elem(da.enable_clipskip_scheduling) + with FormRow(): + clipskip_schedule = create_gr_elem(da.clipskip_schedule) + with gr.TabItem('Seed'): + with FormRow(): + seed_behavior = create_gr_elem(d.seed_behavior) + with FormRow() as seed_iter_N_row: + seed_iter_N = create_gr_elem(d.seed_iter_N) + with FormRow(visible=False) as seed_schedule_row: + seed_schedule = create_gr_elem(da.seed_schedule) + with gr.TabItem('SubSeed', open=False) as subseed_sch_tab: + with FormRow(): + enable_subseed_scheduling = create_gr_elem(da.enable_subseed_scheduling) + subseed_schedule = create_gr_elem(da.subseed_schedule) + subseed_strength_schedule = create_gr_elem(da.subseed_strength_schedule) + with FormRow(): + seed_resize_from_w = create_gr_elem(d.seed_resize_from_w) + seed_resize_from_h = create_gr_elem(d.seed_resize_from_h) + # Steps Scheduling + with gr.TabItem('Step'): + with FormRow(): + enable_steps_scheduling = create_gr_elem(da.enable_steps_scheduling) + with FormRow(): + steps_schedule = create_gr_elem(da.steps_schedule) + # Sampler Scheduling + with gr.TabItem('Sampler'): + with FormRow(): + enable_sampler_scheduling = create_gr_elem(da.enable_sampler_scheduling) + with FormRow(): + sampler_schedule = create_gr_elem(da.sampler_schedule) + # Checkpoint Scheduling + with gr.TabItem('Checkpoint'): + with FormRow(): + enable_checkpoint_scheduling = create_gr_elem(da.enable_checkpoint_scheduling) + with FormRow(): + checkpoint_schedule = create_gr_elem(da.checkpoint_schedule) + # MOTION INNER TAB + with gr.Tabs(elem_id='motion_noise_etc'): + with gr.TabItem('Motion') as motion_tab: + with FormColumn() as only_2d_motion_column: + with FormRow(): + zoom = create_gr_elem(da.zoom) + with FormRow(): + angle = create_gr_elem(da.angle) + with FormRow(): + transform_center_x = create_gr_elem(da.transform_center_x) + with FormRow(): + transform_center_y = create_gr_elem(da.transform_center_y) + with FormColumn() as both_anim_mode_motion_params_column: + with FormRow(): + translation_x = create_gr_elem(da.translation_x) + with FormRow(): + translation_y = create_gr_elem(da.translation_y) + with FormColumn(visible=False) as only_3d_motion_column: + with FormRow(): + translation_z = create_gr_elem(da.translation_z) + with FormRow(): + rotation_3d_x = create_gr_elem(da.rotation_3d_x) + with FormRow(): + rotation_3d_y = create_gr_elem(da.rotation_3d_y) + with FormRow(): + rotation_3d_z = create_gr_elem(da.rotation_3d_z) + # PERSPECTIVE FLIP - inner params are hidden if not enabled + with FormRow() as enable_per_f_row: + enable_perspective_flip = create_gr_elem(da.enable_perspective_flip) + with FormRow(visible=False) as per_f_th_row: + perspective_flip_theta = create_gr_elem(da.perspective_flip_theta) + with FormRow(visible=False) as per_f_ph_row: + perspective_flip_phi = create_gr_elem(da.perspective_flip_phi) + with FormRow(visible=False) as per_f_ga_row: + perspective_flip_gamma = create_gr_elem(da.perspective_flip_gamma) + with FormRow(visible=False) as per_f_f_row: + perspective_flip_fv = create_gr_elem(da.perspective_flip_fv) + # NOISE INNER TAB + with gr.TabItem('Noise'): + with FormColumn() as noise_tab_column: + with FormRow(): + noise_type = create_gr_elem(da.noise_type) + with FormRow(): + noise_schedule = create_gr_elem(da.noise_schedule) + with FormRow() as perlin_row: + with FormColumn(min_width=220): + perlin_octaves = create_gr_elem(da.perlin_octaves) + with FormColumn(min_width=220): + perlin_persistence = create_gr_elem(da.perlin_persistence) + # following two params are INVISIBLE IN UI as of 21-05-23 + perlin_w = create_gr_elem(da.perlin_w) + perlin_h = create_gr_elem(da.perlin_h) + with FormRow(): + enable_noise_multiplier_scheduling = create_gr_elem(da.enable_noise_multiplier_scheduling) + with FormRow(): + noise_multiplier_schedule = create_gr_elem(da.noise_multiplier_schedule) + # COHERENCE INNER TAB + with gr.TabItem('Coherence', open=False) as coherence_accord: + with FormRow(): + color_coherence = create_gr_elem(da.color_coherence) + color_force_grayscale = create_gr_elem(da.color_force_grayscale) + with FormRow(): + legacy_colormatch = create_gr_elem(da.legacy_colormatch) + with FormRow(visible=False) as color_coherence_image_path_row: + color_coherence_image_path = create_gr_elem(da.color_coherence_image_path) + with FormRow(visible=False) as color_coherence_video_every_N_frames_row: + color_coherence_video_every_N_frames = create_gr_elem(da.color_coherence_video_every_N_frames) + with FormRow() as optical_flow_cadence_row: + with FormColumn(min_width=220) as optical_flow_cadence_column: + optical_flow_cadence = create_gr_elem(da.optical_flow_cadence) + with FormColumn(min_width=220, visible=False) as cadence_flow_factor_schedule_column: + cadence_flow_factor_schedule = create_gr_elem(da.cadence_flow_factor_schedule) + with FormRow(): + with FormColumn(min_width=220): + optical_flow_redo_generation = create_gr_elem(da.optical_flow_redo_generation) + with FormColumn(min_width=220, visible=False) as redo_flow_factor_schedule_column: + redo_flow_factor_schedule = create_gr_elem(da.redo_flow_factor_schedule) + with FormRow(): + contrast_schedule = gr.Textbox(label="Contrast schedule", lines=1, value=da.contrast_schedule, interactive=True, + info="adjusts the overall contrast per frame [neutral at 1.0, recommended to *not* play with this param]") + diffusion_redo = gr.Slider(label="Redo generation", minimum=0, maximum=50, step=1, value=da.diffusion_redo, interactive=True, + info="this option renders N times before the final render. it is suggested to lower your steps if you up your redo. seed is randomized during redo generations and restored afterwards") + with FormRow(): + # what to do with blank frames (they may result from glitches or the NSFW filter being turned on): reroll with +1 seed, interrupt the animation generation, or do nothing + reroll_blank_frames = create_gr_elem(d.reroll_blank_frames) + reroll_patience = create_gr_elem(d.reroll_patience) + # ANTI BLUR INNER TAB + with gr.TabItem('Anti Blur', elem_id='anti_blur_accord') as anti_blur_tab: + with FormRow(): + amount_schedule = create_gr_elem(da.amount_schedule) + with FormRow(): + kernel_schedule = create_gr_elem(da.kernel_schedule) + with FormRow(): + sigma_schedule = create_gr_elem(da.sigma_schedule) + with FormRow(): + threshold_schedule = create_gr_elem(da.threshold_schedule) + with gr.TabItem('Depth Warping & FOV', elem_id='depth_warp_fov_tab') as depth_warp_fov_tab: + # this html only shows when not in 2d/3d mode + depth_warp_msg_html = gr.HTML(value='Please switch to 3D animation mode to view this section.', elem_id='depth_warp_msg_html') + with FormRow(visible=False) as depth_warp_row_1: + use_depth_warping = create_gr_elem(da.use_depth_warping) + # *the following html only shows when LeReS depth is selected* + leres_license_msg = gr.HTML(value=get_gradio_html('leres'), visible=False, elem_id='leres_license_msg') + depth_algorithm = create_gr_elem(da.depth_algorithm) + midas_weight = create_gr_elem(da.midas_weight) + with FormRow(visible=False) as depth_warp_row_2: + padding_mode = create_gr_elem(da.padding_mode) + sampling_mode = create_gr_elem(da.sampling_mode) + with FormRow(visible=False) as depth_warp_row_3: + aspect_ratio_use_old_formula = create_gr_elem(da.aspect_ratio_use_old_formula) + with FormRow(visible=False) as depth_warp_row_4: + aspect_ratio_schedule = create_gr_elem(da.aspect_ratio_schedule) + with FormRow(visible=False) as depth_warp_row_5: + fov_schedule = create_gr_elem(da.fov_schedule) + with FormRow(visible=False) as depth_warp_row_6: + near_schedule = create_gr_elem(da.near_schedule) + with FormRow(visible=False) as depth_warp_row_7: + far_schedule = create_gr_elem(da.far_schedule) + + return {k: v for k, v in {**locals(), **vars()}.items()} + +def get_tab_prompts(da): + with gr.TabItem('Prompts'): + # PROMPTS INFO ACCORD + with gr.Accordion(label='*Important* notes on Prompts', elem_id='prompts_info_accord', open=False) as prompts_info_accord: + gr.HTML(value=get_gradio_html('prompts')) + with FormRow(): + animation_prompts = gr.Textbox(label="Prompts", lines=8, interactive=True, value=DeforumAnimPrompts(), + info="full prompts list in a JSON format. value on left side is the frame number") + with FormRow(): + animation_prompts_positive = gr.Textbox(label="Prompts positive", lines=1, interactive=True, placeholder="words in here will be added to the start of all positive prompts") + with FormRow(): + animation_prompts_negative = gr.Textbox(label="Prompts negative", value="nsfw, nude", lines=1, interactive=True, + placeholder="words in here will be added to the end of all negative prompts") + # COMPOSABLE MASK SCHEDULING ACCORD + with gr.Accordion('Composable Mask scheduling', open=False): + gr.HTML(value=get_gradio_html('composable_masks')) + with FormRow(): + mask_schedule = create_gr_elem(da.mask_schedule) + with FormRow(): + use_noise_mask = create_gr_elem(da.use_noise_mask) + with FormRow(): + noise_mask_schedule = create_gr_elem(da.noise_mask_schedule) + + return {k: v for k, v in {**locals(), **vars()}.items()} + +def get_tab_init(d, da, dp): + with gr.TabItem('Init'): + # IMAGE INIT INNER-TAB + with gr.Tab('Image Init'): + with FormRow(): + with gr.Column(min_width=150): + use_init = create_gr_elem(d.use_init) + with gr.Column(min_width=150): + strength_0_no_init = create_gr_elem(d.strength_0_no_init) + with gr.Column(min_width=170): + strength = create_gr_elem(d.strength) + with FormRow(): + init_image = create_gr_elem(d.init_image) + with FormRow(): + init_image_box = create_gr_elem(d.init_image_box) + # VIDEO INIT INNER-TAB + with gr.Tab('Video Init'): + with FormRow(): + video_init_path = create_gr_elem(da.video_init_path) + with FormRow(): + extract_from_frame = create_gr_elem(da.extract_from_frame) + extract_to_frame = create_gr_elem(da.extract_to_frame) + extract_nth_frame = create_gr_elem(da.extract_nth_frame) + overwrite_extracted_frames = create_gr_elem(da.overwrite_extracted_frames) + use_mask_video = create_gr_elem(da.use_mask_video) + with FormRow(): + video_mask_path = create_gr_elem(da.video_mask_path) + # MASK INIT INNER-TAB + with gr.Tab('Mask Init'): + with FormRow(): + use_mask = create_gr_elem(d.use_mask) + use_alpha_as_mask = create_gr_elem(d.use_alpha_as_mask) + invert_mask = create_gr_elem(d.invert_mask) + overlay_mask = create_gr_elem(d.overlay_mask) + with FormRow(): + mask_file = create_gr_elem(d.mask_file) + with FormRow(): + mask_overlay_blur = create_gr_elem(d.mask_overlay_blur) + with FormRow(): + fill = create_gr_elem(d.fill) + with FormRow(): + full_res_mask = create_gr_elem(d.full_res_mask) + full_res_mask_padding = create_gr_elem(d.full_res_mask_padding) + with FormRow(): + with FormColumn(min_width=240): + mask_contrast_adjust = create_gr_elem(d.mask_contrast_adjust) + with FormColumn(min_width=250): + mask_brightness_adjust = create_gr_elem(d.mask_brightness_adjust) + # PARSEQ ACCORD + with gr.Accordion('Parseq', open=False): + gr.HTML(value=get_gradio_html('parseq')) + with FormRow(): + parseq_manifest = create_gr_elem(dp.parseq_manifest) + with FormRow(): + parseq_use_deltas = create_gr_elem(dp.parseq_use_deltas) + return {k: v for k, v in {**locals(), **vars()}.items()} + +def get_tab_hybrid(da): + with gr.TabItem('Hybrid Video'): + # this html only shows when not in 2d/3d mode + hybrid_msg_html = gr.HTML(value='Change animation mode to 2D or 3D to enable Hybrid Mode', visible=False, elem_id='hybrid_msg_html') + # HYBRID INFO ACCORD + with gr.Accordion("Info & Help", open=False): + gr.HTML(value=get_gradio_html('hybrid_video')) + # HYBRID SETTINGS ACCORD + with gr.Accordion("Hybrid Settings", open=True) as hybrid_settings_accord: + with FormRow(): + hybrid_composite = gr.Radio(['None', 'Normal', 'Before Motion', 'After Generation'], label="Hybrid composite", value=da.hybrid_composite, elem_id="hybrid_composite") + with FormRow(): + with FormColumn(min_width=340): + with FormRow(): + hybrid_generate_inputframes = create_gr_elem(da.hybrid_generate_inputframes) + hybrid_use_first_frame_as_init_image = create_gr_elem(da.hybrid_use_first_frame_as_init_image) + hybrid_use_init_image = create_gr_elem(da.hybrid_use_init_image) + with FormRow(): + with FormColumn(): + with FormRow(): + hybrid_motion = create_gr_elem(da.hybrid_motion) + with FormColumn(): + with FormRow(): + with FormColumn(scale=1): + hybrid_flow_method = create_gr_elem(da.hybrid_flow_method) + with FormRow(): + with FormColumn(): + hybrid_flow_consistency = create_gr_elem(da.hybrid_flow_consistency) + hybrid_consistency_blur = create_gr_elem(da.hybrid_consistency_blur) + with FormColumn(): + hybrid_motion_use_prev_img = create_gr_elem(da.hybrid_motion_use_prev_img) + with FormRow(): + hybrid_comp_mask_type = create_gr_elem(da.hybrid_comp_mask_type) + with gr.Row(visible=False, variant='compact') as hybrid_comp_mask_row: + hybrid_comp_mask_equalize = create_gr_elem(da.hybrid_comp_mask_equalize) + with FormColumn(): + hybrid_comp_mask_auto_contrast = gr.Checkbox(label="Comp mask auto contrast", value=False, interactive=True) + hybrid_comp_mask_inverse = gr.Checkbox(label="Comp mask inverse", value=da.hybrid_comp_mask_inverse, interactive=True) + with FormRow(): + hybrid_comp_save_extra_frames = gr.Checkbox(label="Comp save extra frames", value=False, interactive=True) + # HYBRID SCHEDULES ACCORD + with gr.Accordion("Hybrid Schedules", open=False, visible=False) as hybrid_sch_accord: + with FormRow() as hybrid_comp_alpha_schedule_row: + hybrid_comp_alpha_schedule = create_gr_elem(da.hybrid_comp_alpha_schedule) + with FormRow() as hybrid_flow_factor_schedule_row: + hybrid_flow_factor_schedule = create_gr_elem(da.hybrid_flow_factor_schedule) + with FormRow(visible=False) as hybrid_comp_mask_blend_alpha_schedule_row: + hybrid_comp_mask_blend_alpha_schedule = create_gr_elem(da.hybrid_comp_mask_blend_alpha_schedule) + with FormRow(visible=False) as hybrid_comp_mask_contrast_schedule_row: + hybrid_comp_mask_contrast_schedule = create_gr_elem(da.hybrid_comp_mask_contrast_schedule) + with FormRow(visible=False) as hybrid_comp_mask_auto_contrast_cutoff_high_schedule_row: + hybrid_comp_mask_auto_contrast_cutoff_high_schedule = create_gr_elem(da.hybrid_comp_mask_auto_contrast_cutoff_high_schedule) + with FormRow(visible=False) as hybrid_comp_mask_auto_contrast_cutoff_low_schedule_row: + hybrid_comp_mask_auto_contrast_cutoff_low_schedule = create_gr_elem(da.hybrid_comp_mask_auto_contrast_cutoff_low_schedule) + # HUMANS MASKING ACCORD + with gr.Accordion("Humans Masking", open=False, visible=False) as humans_masking_accord: + with FormRow(): + hybrid_generate_human_masks = create_gr_elem(da.hybrid_generate_human_masks) + + return {k: v for k, v in {**locals(), **vars()}.items()} + +def get_tab_output(da, dv): + with gr.TabItem('Output', elem_id='output_tab'): + # VID OUTPUT ACCORD + with gr.Accordion('Video Output Settings', open=True): + with FormRow() as fps_out_format_row: + fps = create_gr_elem(dv.fps) + with FormColumn(): + with FormRow() as soundtrack_row: + add_soundtrack = create_gr_elem(dv.add_soundtrack) + soundtrack_path = create_gr_elem(dv.soundtrack_path) + with FormRow(): + skip_video_creation = create_gr_elem(dv.skip_video_creation) + delete_imgs = create_gr_elem(dv.delete_imgs) + delete_input_frames = create_gr_elem(dv.delete_input_frames) + store_frames_in_ram = create_gr_elem(dv.store_frames_in_ram) + save_depth_maps = create_gr_elem(da.save_depth_maps) + make_gif = create_gr_elem(dv.make_gif) + with FormRow(equal_height=True) as r_upscale_row: + r_upscale_video = create_gr_elem(dv.r_upscale_video) + r_upscale_model = create_gr_elem(dv.r_upscale_model) + r_upscale_factor = create_gr_elem(dv.r_upscale_factor) + r_upscale_keep_imgs = create_gr_elem(dv.r_upscale_keep_imgs) + # FRAME INTERPOLATION TAB + with gr.Tab('Frame Interpolation') as frame_interp_tab: + with gr.Accordion('Important notes and Help', open=False, elem_id="f_interp_accord"): + gr.HTML(value=get_gradio_html('frame_interpolation')) + with gr.Column(): + with gr.Row(): + # Interpolation Engine + with gr.Column(min_width=110, scale=3): + frame_interpolation_engine = create_gr_elem(dv.frame_interpolation_engine) + with gr.Column(min_width=30, scale=1): + frame_interpolation_slow_mo_enabled = create_gr_elem(dv.frame_interpolation_slow_mo_enabled) + with gr.Column(min_width=30, scale=1): + # If this is set to True, we keep all the interpolated frames in a folder. Default is False - means we delete them at the end of the run + frame_interpolation_keep_imgs = create_gr_elem(dv.frame_interpolation_keep_imgs) + with gr.Column(min_width=30, scale=1): + frame_interpolation_use_upscaled = create_gr_elem(dv.frame_interpolation_use_upscaled) + with FormRow(visible=False) as frame_interp_amounts_row: + with gr.Column(min_width=180) as frame_interp_x_amount_column: + # How many times to interpolate (interp X) + frame_interpolation_x_amount = create_gr_elem(dv.frame_interpolation_x_amount) + with gr.Column(min_width=180, visible=False) as frame_interp_slow_mo_amount_column: + # Interp Slow-Mo (setting final output fps, not really doing anything directly with RIFE/FILM) + frame_interpolation_slow_mo_amount = create_gr_elem(dv.frame_interpolation_slow_mo_amount) + with gr.Row(visible=False) as interp_existing_video_row: + # Interpolate any existing video from the connected PC + with gr.Accordion('Interpolate existing Video/ Images', open=False) as interp_existing_video_accord: + with gr.Row(variant='compact') as interpolate_upload_files_row: + # A drag-n-drop UI box to which the user uploads a *single* (at this stage) video + vid_to_interpolate_chosen_file = gr.File(label="Video to Interpolate", interactive=True, file_count="single", file_types=["video"], + elem_id="vid_to_interpolate_chosen_file") + # A drag-n-drop UI box to which the user uploads a pictures to interpolate + pics_to_interpolate_chosen_file = gr.File(label="Pics to Interpolate", interactive=True, file_count="multiple", file_types=["image"], + elem_id="pics_to_interpolate_chosen_file") + with FormRow(visible=False) as interp_live_stats_row: + # Non-interactive textbox showing uploaded input vid total Frame Count + in_vid_frame_count_window = gr.Textbox(label="In Frame Count", lines=1, interactive=False, value='---') + # Non-interactive textbox showing uploaded input vid FPS + in_vid_fps_ui_window = gr.Textbox(label="In FPS", lines=1, interactive=False, value='---') + # Non-interactive textbox showing expected output interpolated video FPS + out_interp_vid_estimated_fps = gr.Textbox(label="Interpolated Vid FPS", value='---') + with FormRow() as interp_buttons_row: + # This is the actual button that's pressed to initiate the interpolation: + interpolate_button = gr.Button(value="*Interpolate Video*") + interpolate_pics_button = gr.Button(value="*Interpolate Pics*") + # Show a text about CLI outputs: + gr.HTML("* check your CLI for outputs *", elem_id="below_interpolate_butts_msg") + # make the function call when the interpolation button is clicked + interpolate_button.click(fn=upload_vid_to_interpolate, + inputs=[vid_to_interpolate_chosen_file, frame_interpolation_engine, frame_interpolation_x_amount, frame_interpolation_slow_mo_enabled, + frame_interpolation_slow_mo_amount, frame_interpolation_keep_imgs, in_vid_fps_ui_window]) + interpolate_pics_button.click(fn=upload_pics_to_interpolate, + inputs=[pics_to_interpolate_chosen_file, frame_interpolation_engine, frame_interpolation_x_amount, frame_interpolation_slow_mo_enabled, + frame_interpolation_slow_mo_amount, frame_interpolation_keep_imgs, fps, add_soundtrack, soundtrack_path]) + # VIDEO UPSCALE TAB - not built using our args.py at all - all data and params are here and in .upscaling file + with gr.TabItem('Video Upscaling'): + vid_to_upscale_chosen_file = gr.File(label="Video to Upscale", interactive=True, file_count="single", file_types=["video"], elem_id="vid_to_upscale_chosen_file") + with gr.Column(): + # NCNN UPSCALE TAB + with FormRow() as ncnn_upload_vid_stats_row: + ncnn_upscale_in_vid_frame_count_window = gr.Textbox(label="In Frame Count", lines=1, interactive=False, + value='---') # Non-interactive textbox showing uploaded input vid Frame Count + ncnn_upscale_in_vid_fps_ui_window = gr.Textbox(label="In FPS", lines=1, interactive=False, value='---') # Non-interactive textbox showing uploaded input vid FPS + ncnn_upscale_in_vid_res = gr.Textbox(label="In Res", lines=1, interactive=False, value='---') # Non-interactive textbox showing uploaded input resolution + ncnn_upscale_out_vid_res = gr.Textbox(label="Out Res", value='---') # Non-interactive textbox showing expected output resolution + with gr.Column(): + with FormRow() as ncnn_actual_upscale_row: + ncnn_upscale_model = create_gr_elem(dv.r_upscale_model) # note that we re-use *r_upscale_model* in here to create the gradio element as they are the same + ncnn_upscale_factor = create_gr_elem(dv.r_upscale_factor) # note that we re-use *r_upscale_facto*r in here to create the gradio element as they are the same + ncnn_upscale_keep_imgs = create_gr_elem(dv.r_upscale_keep_imgs) # note that we re-use *r_upscale_keep_imgs* in here to create the gradio element as they are the same + ncnn_upscale_btn = gr.Button(value="*Upscale uploaded video*") + ncnn_upscale_btn.click(fn=ncnn_upload_vid_to_upscale, + inputs=[vid_to_upscale_chosen_file, ncnn_upscale_in_vid_fps_ui_window, ncnn_upscale_in_vid_res, ncnn_upscale_out_vid_res, ncnn_upscale_model, + ncnn_upscale_factor, ncnn_upscale_keep_imgs]) + # Vid2Depth TAB - not built using our args.py at all - all data and params are here and in .vid2depth file + with gr.TabItem('Vid2depth'): + vid_to_depth_chosen_file = gr.File(label="Video to get Depth from", interactive=True, file_count="single", file_types=["video"], elem_id="vid_to_depth_chosen_file") + with FormRow(): + mode = gr.Dropdown(label='Mode', elem_id="mode", choices=['Depth (Midas/Adabins)', 'Anime Remove Background', 'Mixed', 'None (just grayscale)'], value='Depth (Midas/Adabins)') + threshold_value = gr.Slider(label="Threshold Value Lower", value=127, minimum=0, maximum=255, step=1) + threshold_value_max = gr.Slider(label="Threshold Value Upper", value=255, minimum=0, maximum=255, step=1) + with FormRow(): + thresholding = gr.Radio(['None', 'Simple', 'Simple (Auto-value)', 'Adaptive (Mean)', 'Adaptive (Gaussian)'], label="Thresholding Mode", value='None') + with FormRow(): + adapt_block_size = gr.Number(label="Block size", value=11) + adapt_c = gr.Number(label="C", value=2) + invert = gr.Checkbox(label='Closer is brighter', value=True, elem_id="invert") + with FormRow(): + end_blur = gr.Slider(label="End blur width", value=0, minimum=0, maximum=255, step=1) + midas_weight_vid2depth = gr.Slider(label="MiDaS weight (vid2depth)", value=da.midas_weight, minimum=0, maximum=1, step=0.05, interactive=True, + info="sets a midpoint at which a depth-map is to be drawn: range [-1 to +1]") + depth_keep_imgs = gr.Checkbox(label='Keep Imgs', value=True, elem_id="depth_keep_imgs") + with FormRow(): + # This is the actual button that's pressed to initiate the Upscaling: + depth_btn = gr.Button(value="*Get depth from uploaded video*") + with FormRow(): + # Show a text about CLI outputs: + gr.HTML("* check your CLI for outputs") + # make the function call when the UPSCALE button is clicked + depth_btn.click(fn=upload_vid_to_depth, + inputs=[vid_to_depth_chosen_file, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth, depth_keep_imgs]) + # STITCH FRAMES TO VID TAB + with gr.TabItem('Frames to Video') as stitch_imgs_to_vid_row: + gr.HTML(value=get_gradio_html('frames_to_video')) + with FormRow(): + image_path = create_gr_elem(dv.image_path) + ffmpeg_stitch_imgs_but = gr.Button(value="*Stitch frames to video*") + ffmpeg_stitch_imgs_but.click(fn=direct_stitch_vid_from_frames, inputs=[image_path, fps, add_soundtrack, soundtrack_path]) + + return {k: v for k, v in {**locals(), **vars()}.items()} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_left.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_left.py new file mode 100644 index 0000000000000000000000000000000000000000..aa18863725affd3e375b8a15d2e181ed1be55336 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_left.py @@ -0,0 +1,61 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +from types import SimpleNamespace +import gradio as gr +from .defaults import get_gradio_html +from .gradio_funcs import change_css, handle_change_functions +from .args import DeforumArgs, DeforumAnimArgs, ParseqArgs, DeforumOutputArgs, RootArgs, LoopArgs +from .deforum_controlnet import setup_controlnet_ui +from .ui_elements import get_tab_run, get_tab_keyframes, get_tab_prompts, get_tab_init, get_tab_hybrid, get_tab_output + +def set_arg_lists(): + # convert dicts to NameSpaces for easy working (args.param instead of args['param'] + d = SimpleNamespace(**DeforumArgs()) # default args + da = SimpleNamespace(**DeforumAnimArgs()) # default anim args + dp = SimpleNamespace(**ParseqArgs()) # default parseq ars + dv = SimpleNamespace(**DeforumOutputArgs()) # default video args + dr = SimpleNamespace(**RootArgs()) # ROOT args + dloopArgs = SimpleNamespace(**LoopArgs()) # Guided imgs args + return d, da, dp, dv, dr, dloopArgs + +def setup_deforum_left_side_ui(): + d, da, dp, dv, dr, dloopArgs = set_arg_lists() + # set up main info accordion on top of the UI + with gr.Accordion("Info, Links and Help", open=False, elem_id='main_top_info_accord'): + gr.HTML(value=get_gradio_html('main')) + # show button to hide/ show gradio's info texts for each element in the UI + with gr.Row(variant='compact'): + show_info_on_ui = gr.Checkbox(label="Show more info", value=d.show_info_on_ui, interactive=True) + with gr.Blocks(): + with gr.Tabs(): + # Get main tab contents: + tab_run_params = get_tab_run(d, da) # Run tab + tab_keyframes_params = get_tab_keyframes(d, da, dloopArgs) # Keyframes tab + tab_prompts_params = get_tab_prompts(da) # Prompts tab + tab_init_params = get_tab_init(d, da, dp) # Init tab + controlnet_dict = setup_controlnet_ui() # ControlNet tab + tab_hybrid_params = get_tab_hybrid(da) # Hybrid tab + tab_output_params = get_tab_output(da, dv) # Output tab + # add returned gradio elements from main tabs to locals() + for key, value in {**tab_run_params, **tab_keyframes_params, **tab_prompts_params, **tab_init_params, **controlnet_dict, **tab_hybrid_params, **tab_output_params}.items(): + locals()[key] = value + + # Gradio's Change functions - hiding and renaming elements based on other elements + show_info_on_ui.change(fn=change_css, inputs=show_info_on_ui, outputs=gr.outputs.HTML()) + handle_change_functions(locals()) + + return locals() diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_right.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_right.py new file mode 100644 index 0000000000000000000000000000000000000000..e5b77134e05627ea9abc3b0a04eed15508eec28b --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_right.py @@ -0,0 +1,150 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +from .args import DeforumOutputArgs, get_component_names, get_settings_component_names +from modules.shared import opts, state +from modules.ui import create_output_panel, wrap_gradio_call +from modules.call_queue import wrap_gradio_gpu_call +from .run_deforum import run_deforum +from .settings import save_settings, load_all_settings, load_video_settings +from .general_utils import get_deforum_version +from .ui_left import setup_deforum_left_side_ui +from scripts.deforum_extend_paths import deforum_sys_extend +import gradio as gr + +def on_ui_tabs(): + # extend paths using sys.path.extend so we can access all of our files and folders + deforum_sys_extend() + # set text above generate button + i1_store_backup = f"

Deforum extension for auto1111 — version 3.0 | Git commit: {get_deforum_version()}

" + i1_store = i1_store_backup + + with gr.Blocks(analytics_enabled=False) as deforum_interface: + components = {} + dummy_component = gr.Label(visible=False) + with gr.Row(elem_id='deforum_progress_row').style(equal_height=False, variant='compact'): + with gr.Column(scale=1, variant='panel'): + # setting the left side of the ui: + components = setup_deforum_left_side_ui() + with gr.Column(scale=1, variant='compact'): + with gr.Row(variant='compact'): + btn = gr.Button("Click here after the generation to show the video") + components['btn'] = btn + close_btn = gr.Button("Close the video", visible=False) + with gr.Row(variant='compact'): + i1 = gr.HTML(i1_store, elem_id='deforum_header') + components['i1'] = i1 + def show_vid(): # Show video button related func + from .run_deforum import last_vid_data # get latest vid preview data (this import needs to stay inside the function!) + return { + i1: gr.update(value=last_vid_data, visible=True), + close_btn: gr.update(visible=True), + btn: gr.update(value="Update the video", visible=True), + } + btn.click( + fn=show_vid, + inputs=[], + outputs=[i1, close_btn, btn], + ) + def close_vid(): # Close video button related func + return { + i1: gr.update(value=i1_store_backup, visible=True), + close_btn: gr.update(visible=False), + btn: gr.update(value="Click here after the generation to show the video", visible=True), + } + + close_btn.click( + fn=close_vid, + inputs=[], + outputs=[i1, close_btn, btn], + ) + id_part = 'deforum' + with gr.Row(elem_id=f"{id_part}_generate_box", variant='compact'): + skip = gr.Button('Pause/Resume', elem_id=f"{id_part}_skip", visible=False) + interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", visible=True) + submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + + skip.click( + fn=lambda: state.skip(), + inputs=[], + outputs=[], + ) + + interrupt.click( + fn=lambda: state.interrupt(), + inputs=[], + outputs=[], + ) + + deforum_gallery, generation_info, html_info, _ = create_output_panel("deforum", opts.outdir_img2img_samples) + + with gr.Row(variant='compact'): + settings_path = gr.Textbox("deforum_settings.txt", elem_id='deforum_settings_path', label="Settings File", info="settings file path can be relative to webui folder OR full - absolute") + with gr.Row(variant='compact'): + save_settings_btn = gr.Button('Save Settings', elem_id='deforum_save_settings_btn') + load_settings_btn = gr.Button('Load All Settings', elem_id='deforum_load_settings_btn') + load_video_settings_btn = gr.Button('Load Video Settings', elem_id='deforum_load_video_settings_btn') + + component_list = [components[name] for name in get_component_names()] + + submit.click( + fn=wrap_gradio_gpu_call(run_deforum), + _js="submit_deforum", + inputs=[dummy_component, dummy_component] + component_list, + outputs=[ + deforum_gallery, + components["resume_timestring"], + generation_info, + html_info + ], + ) + + settings_component_list = [components[name] for name in get_settings_component_names()] + video_settings_component_list = [components[name] for name in list(DeforumOutputArgs().keys())] + + save_settings_btn.click( + fn=wrap_gradio_call(save_settings), + inputs=[settings_path] + settings_component_list + video_settings_component_list, + outputs=[], + ) + + load_settings_btn.click( + fn=wrap_gradio_call(lambda *args, **kwargs: load_all_settings(*args, ui_launch=False, **kwargs)), + inputs=[settings_path] + settings_component_list, + outputs=settings_component_list, + ) + + load_video_settings_btn.click( + fn=wrap_gradio_call(load_video_settings), + inputs=[settings_path] + video_settings_component_list, + outputs=video_settings_component_list, + ) + + # handle persistent settings - load the persistent file upon UI launch + def trigger_load_general_settings(): + print("Loading general settings...") + wrapped_fn = wrap_gradio_call(lambda *args, **kwargs: load_all_settings(*args, ui_launch=True, **kwargs)) + inputs = [settings_path.value] + [component.value for component in settings_component_list] + outputs = settings_component_list + updated_values = wrapped_fn(*inputs, *outputs)[0] + settings_component_name_to_obj = {name: component for name, component in zip(get_settings_component_names(), settings_component_list)} + for key, value in updated_values.items(): + settings_component_name_to_obj[key].value = value['value'] + # actually check persistent setting status + if opts.data.get("deforum_enable_persistent_settings", False): + trigger_load_general_settings() + + return [(deforum_interface, "Deforum", "deforum_interface")] \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_settings.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..ec0fd91a8f5178c914002de29405888a1a6768af --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/ui_settings.py @@ -0,0 +1,37 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import gradio as gr +from modules import ui_components +from modules.shared import opts, cmd_opts, OptionInfo +from .video_audio_utilities import find_ffmpeg_binary +from .subtitle_handler import get_user_values + +def on_ui_settings(): + srt_ui_params = get_user_values() + section = ('deforum', "Deforum") + opts.add_option("deforum_keep_3d_models_in_vram", OptionInfo(False, "Keep 3D models in VRAM between runs", gr.Checkbox, {"interactive": True, "visible": True if not (cmd_opts.lowvram or cmd_opts.medvram) else False}, section=section)) + opts.add_option("deforum_enable_persistent_settings", OptionInfo(False, "Keep settings persistent upon relaunch of webui", gr.Checkbox, {"interactive": True}, section=section)) + opts.add_option("deforum_persistent_settings_path", OptionInfo("models/Deforum/deforum_persistent_settings.txt", "Path for saving your persistent settings file:", section=section)) + opts.add_option("deforum_ffmpeg_location", OptionInfo(find_ffmpeg_binary(), "FFmpeg path/ location", section=section)) + opts.add_option("deforum_ffmpeg_crf", OptionInfo(17, "FFmpeg CRF value", gr.Slider, {"interactive": True, "minimum": 0, "maximum": 51}, section=section)) + opts.add_option("deforum_ffmpeg_preset", OptionInfo('slow', "FFmpeg Preset", gr.Dropdown, {"interactive": True, "choices": ['veryslow', 'slower', 'slow', 'medium', 'fast', 'faster', 'veryfast', 'superfast', 'ultrafast']}, section=section)) + opts.add_option("deforum_debug_mode_enabled", OptionInfo(False, "Enable Dev mode - adds extra reporting in console", gr.Checkbox, {"interactive": True}, section=section)) + opts.add_option("deforum_save_gen_info_as_srt", OptionInfo(False, "Save an .srt (subtitles) file with the generation info along with each animation", gr.Checkbox, {"interactive": True}, section=section)) + opts.add_option("deforum_embed_srt", OptionInfo(False, "If .srt file is saved, soft-embed the subtitles into the rendered video file", gr.Checkbox, {"interactive": True}, section=section)) + opts.add_option("deforum_save_gen_info_as_srt_params", OptionInfo(['Noise Schedule'], "Choose which animation params are to be saved to the .srt file (Frame # and Seed will always be saved):", ui_components.DropdownMulti, lambda: {"interactive": True, "choices": srt_ui_params}, section=section)) + opts.add_option("deforum_preview", OptionInfo("Off", "Generate preview video during generation? (Preview does not include frame interpolation or upscaling.)", gr.Dropdown, {"interactive": True, "choices": ['Off', 'On', 'On, concurrent (don\'t pause generation)']}, section=section)) + opts.add_option("deforum_preview_interval_frames", OptionInfo(100, "Generate preview every N frames", gr.Slider, {"interactive": True, "minimum": 10, "maximum": 500}, section=section)) diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/upscaling.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/upscaling.py new file mode 100644 index 0000000000000000000000000000000000000000..f255cecf75051a435407391723e7794078fd3595 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/upscaling.py @@ -0,0 +1,173 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +from pathlib import Path +import shutil +import time +import subprocess +from .frame_interpolation import clean_folder_name +from .general_utils import duplicate_pngs_from_folder, checksum +from .video_audio_utilities import vid2frames, ffmpeg_stitch_video, extract_number, media_file_has_audio +from basicsr.utils.download_util import load_file_from_url +from .rich import console + +from modules.shared import opts + +# NCNN Upscale section START +def process_ncnn_upscale_vid_upload_logic(vid_path, in_vid_fps, in_vid_res, out_vid_res, models_path, upscale_model, upscale_factor, keep_imgs, f_location, f_crf, f_preset, current_user_os): + print(f"Got a request to *upscale* a video using {upscale_model} at {upscale_factor}") + + folder_name = clean_folder_name(Path(vid_path.orig_name).stem) + outdir = opts.outdir_samples or os.path.join(os.getcwd(), 'outputs') + outdir_no_tmp = outdir + f'/frame-upscaling/{folder_name}' + i = 1 + while os.path.exists(outdir_no_tmp): + outdir_no_tmp = f"{outdir}/frame-upscaling/{folder_name}_{i}" + i += 1 + + outdir = os.path.join(outdir_no_tmp, 'tmp_input_frames') + os.makedirs(outdir, exist_ok=True) + + vid2frames(video_path=vid_path.name, video_in_frame_path=outdir, overwrite=True, extract_from_frame=0, extract_to_frame=-1, numeric_files_output=True, out_img_format='png') + + process_ncnn_video_upscaling(vid_path, outdir, in_vid_fps, in_vid_res, out_vid_res, models_path, upscale_model, upscale_factor, keep_imgs, f_location, f_crf, f_preset, current_user_os) + +def process_ncnn_video_upscaling(vid_path, outdir, in_vid_fps, in_vid_res, out_vid_res, models_path, upscale_model, upscale_factor, keep_imgs, f_location, f_crf, f_preset, current_user_os): + # get clean number from 'x2, x3' etc + clean_num_r_up_factor = extract_number(upscale_factor) + # set paths + realesrgan_ncnn_location = os.path.join(models_path, 'realesrgan_ncnn', 'realesrgan-ncnn-vulkan' + ('.exe' if current_user_os == 'Windows' else '')) + upscaled_folder_path = os.path.join(os.path.dirname(outdir), 'Upscaled_frames') + # create folder for upscaled imgs to live in. this folder will stay alive if keep_imgs=True, otherwise get deleted at the end + os.makedirs(upscaled_folder_path, exist_ok=True) + # originally we used vid_path.orig_name but gradio broke it in v 3.23 so we use a hack on vid_path.name, which might not hold forever. 2023-04-05 + out_upscaled_mp4_path = os.path.join(os.path.dirname(outdir), f"{os.path.basename(vid_path.name)}_Upscaled_{upscale_factor}.mp4") + # download upscaling model if needed + check_and_download_realesrgan_ncnn(models_path, current_user_os) + # set cmd command + cmd = [realesrgan_ncnn_location, '-i', outdir, '-o', upscaled_folder_path, '-s', str(clean_num_r_up_factor), '-n', upscale_model] + # msg to print - need it to hide that text later on (!) + msg_to_print = f"Upscaling raw PNGs using {upscale_model} at {upscale_factor}..." + # blink the msg in the cli until action is done + console.print(msg_to_print, style="blink yellow", end="") + start_time = time.time() + # make call to ncnn upscaling executble + process = subprocess.run(cmd, capture_output=True, check=True, text=True) + print("\r" + " " * len(msg_to_print), end="", flush=True) + print(f"\r{msg_to_print}", flush=True) + print(f"\rUpscaling \033[0;32mdone\033[0m in {time.time() - start_time:.2f} seconds!", flush=True) + # set custom path for ffmpeg func below + upscaled_imgs_path_for_ffmpeg = os.path.join(upscaled_folder_path, "%09d.png") + add_soundtrack = 'None' + # don't pass add_soundtrack to ffmpeg if orig video doesn't contain any audio, so we won't get a message saying audio couldn't be added :) + if media_file_has_audio(vid_path.name, f_location): + add_soundtrack = 'File' + # stitch video from upscaled pngs + ffmpeg_stitch_video(ffmpeg_location=f_location, fps=in_vid_fps, outmp4_path=out_upscaled_mp4_path, stitch_from_frame=0, stitch_to_frame=-1, imgs_path=upscaled_imgs_path_for_ffmpeg, add_soundtrack=add_soundtrack, audio_path=vid_path.name, crf=f_crf, preset=f_preset) + # delete the raw video pngs + shutil.rmtree(outdir) + # delete upscaled imgs if user requested + if not keep_imgs: + shutil.rmtree(upscaled_folder_path) + +def check_and_download_realesrgan_ncnn(models_folder, current_user_os): + import zipfile + if current_user_os == 'Windows': + zip_file_name = 'realesrgan-ncnn-windows.zip' + executble_name = 'realesrgan-ncnn-vulkan.exe' + zip_checksum_value = '1d073f520a4a3f6438a500fea88407964da6d4a87489719bedfa7445b76c019fdd95a5c39576ca190d7ac22c906b33d5250a6f48cb7eda2b6af3e86ec5f09dfc' + download_url = 'https://github.com/hithereai/Real-ESRGAN/releases/download/real-esrgan-ncnn-windows/realesrgan-ncnn-windows.zip' + elif current_user_os == 'Linux': + zip_file_name = 'realesrgan-ncnn-linux.zip' + executble_name = 'realesrgan-ncnn-vulkan' + zip_checksum_value = 'df44c4e9a1ff66331079795f018a67fbad8ce37c4472929a56b5a38440cf96982d6e164a086b438c3d26d269025290dd6498bd50846bda8691521ecf8f0fafdf' + download_url = 'https://github.com/hithereai/Real-ESRGAN/releases/download/real-esrgan-ncnn-linux/realesrgan-ncnn-linux.zip' + elif current_user_os == 'Mac': + zip_file_name = 'realesrgan-ncnn-mac.zip' + executble_name = 'realesrgan-ncnn-vulkan' + zip_checksum_value = '65f09472025b55b18cf6ba64149ede8cded90c20e18d35a9edb1ab60715b383a6ffbf1be90d973fc2075cf99d4cc1411fbdc459411af5c904f544b8656111469' + download_url = 'https://github.com/hithereai/Real-ESRGAN/releases/download/real-esrgan-ncnn-mac/realesrgan-ncnn-mac.zip' + else: # who are you then? + raise Exception(f"No support for OS type: {current_user_os}") + + # set paths + realesrgan_ncnn_folder = os.path.join(models_folder, 'realesrgan_ncnn') + realesrgan_exec_path = os.path.join(realesrgan_ncnn_folder, executble_name) + realesrgan_zip_path = os.path.join(realesrgan_ncnn_folder, zip_file_name) + # return if exec file already exist + if os.path.exists(realesrgan_exec_path): + return + try: + os.makedirs(realesrgan_ncnn_folder, exist_ok=True) + # download exec and model files from url + load_file_from_url(download_url, realesrgan_ncnn_folder) + # check downloaded zip's hash + with open(realesrgan_zip_path, 'rb') as f: + file_hash = checksum(realesrgan_zip_path) + # wrong hash, file is probably broken/ download interrupted + if file_hash != zip_checksum_value: + raise Exception(f"Error while downloading {realesrgan_zip_path}. Please download from: {download_url}, and extract its contents into: {models_folder}/realesrgan_ncnn") + # hash ok, extract zip contents into our folder + with zipfile.ZipFile(realesrgan_zip_path, 'r') as zip_ref: + zip_ref.extractall(realesrgan_ncnn_folder) + # delete the zip file + os.remove(realesrgan_zip_path) + # chmod 755 the exec if we're in a linux machine, otherwise we'd get permission errors + if current_user_os in ('Linux', 'Mac'): + os.chmod(realesrgan_exec_path, 0o755) + # enable running the exec for mac users + if current_user_os == 'Mac': + os.system(f'xattr -d com.apple.quarantine "{realesrgan_exec_path}"') + + except Exception as e: + raise Exception(f"Error while downloading {realesrgan_zip_path}. Please download from: {download_url}, and extract its contents into: {models_folder}/realesrgan_ncnn") + +def make_upscale_v2(upscale_factor, upscale_model, keep_imgs, imgs_raw_path, imgs_batch_id, deforum_models_path, current_user_os, ffmpeg_location, ffmpeg_crf, ffmpeg_preset, fps, stitch_from_frame, stitch_to_frame, audio_path, add_soundtrack, srt_path=None): + # get clean number from 'x2, x3' etc + clean_num_r_up_factor = extract_number(upscale_factor) + # set paths + realesrgan_ncnn_location = os.path.join(deforum_models_path, 'realesrgan_ncnn', 'realesrgan-ncnn-vulkan' + ('.exe' if current_user_os == 'Windows' else '')) + upscaled_folder_path = os.path.join(imgs_raw_path, f"{imgs_batch_id}_upscaled") + temp_folder_to_keep_raw_ims = os.path.join(upscaled_folder_path, 'temp_raw_imgs_to_upscale') + out_upscaled_mp4_path = os.path.join(imgs_raw_path, f"{imgs_batch_id}_Upscaled_{upscale_factor}.mp4") + # download upscaling model if needed + check_and_download_realesrgan_ncnn(deforum_models_path, current_user_os) + # make a folder with only the imgs we need to duplicate so we can call the ncnn with the folder syntax (quicker!) + duplicate_pngs_from_folder(from_folder=imgs_raw_path, to_folder=temp_folder_to_keep_raw_ims, img_batch_id=imgs_batch_id, orig_vid_name='Dummy') + # set dynamic cmd command + cmd = [realesrgan_ncnn_location, '-i', temp_folder_to_keep_raw_ims, '-o', upscaled_folder_path, '-s', str(clean_num_r_up_factor), '-n', upscale_model] + # msg to print - need it to hide that text later on (!) + msg_to_print = f"Upscaling raw output PNGs using {upscale_model} at {upscale_factor}..." + # blink the msg in the cli until action is done + console.print(msg_to_print, style="blink yellow", end="") + start_time = time.time() + # make call to ncnn upscaling executble + process = subprocess.run(cmd, capture_output=True, check=True, text=True, cwd=(os.path.join(deforum_models_path, 'realesrgan_ncnn') if current_user_os == 'Mac' else None)) + print("\r" + " " * len(msg_to_print), end="", flush=True) + print(f"\r{msg_to_print}", flush=True) + print(f"\rUpscaling \033[0;32mdone\033[0m in {time.time() - start_time:.2f} seconds!", flush=True) + # set custom path for ffmpeg func below + upscaled_imgs_path_for_ffmpeg = os.path.join(upscaled_folder_path, f"{imgs_batch_id}_%09d.png") + # stitch video from upscaled pngs + ffmpeg_stitch_video(ffmpeg_location=ffmpeg_location, fps=fps, outmp4_path=out_upscaled_mp4_path, stitch_from_frame=stitch_from_frame, stitch_to_frame=stitch_to_frame, imgs_path=upscaled_imgs_path_for_ffmpeg, add_soundtrack=add_soundtrack, audio_path=audio_path, crf=ffmpeg_crf, preset=ffmpeg_preset, srt_path=srt_path) + + # delete the duplicated raw imgs + shutil.rmtree(temp_folder_to_keep_raw_ims) + + if not keep_imgs: + shutil.rmtree(upscaled_folder_path) +# NCNN Upscale section END \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/vid2depth.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/vid2depth.py new file mode 100644 index 0000000000000000000000000000000000000000..d05278e55030c0de2110c5c57c7c537c50a72848 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/vid2depth.py @@ -0,0 +1,242 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +# TODO: deduplicate upscaling/interp/vid2depth code + +import os, gc +import numpy as np +import cv2 +from pathlib import Path +from tqdm import tqdm +from PIL import Image, ImageOps, ImageChops +from modules.shared import cmd_opts, device as sh_device +from modules import devices +import shutil +from .frame_interpolation import clean_folder_name +from rife.inference_video import duplicate_pngs_from_folder +from .video_audio_utilities import get_quick_vid_info, vid2frames, ffmpeg_stitch_video + +def process_depth_vid_upload_logic(file, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth, vid_file_name, keep_imgs, f_location, f_crf, f_preset, f_models_path): + print("got a request to *vid2depth* an existing video.") + + in_vid_fps, _, _ = get_quick_vid_info(file.name) + folder_name = clean_folder_name(Path(vid_file_name).stem) + outdir_no_tmp = os.path.join(os.getcwd(), 'outputs', 'frame-depth', folder_name) + i = 1 + while os.path.exists(outdir_no_tmp): + outdir_no_tmp = os.path.join(os.getcwd(), 'outputs', 'frame-depth', folder_name + '_' + str(i)) + i += 1 + + outdir = os.path.join(outdir_no_tmp, 'tmp_input_frames') + os.makedirs(outdir, exist_ok=True) + + vid2frames(video_path=file.name, video_in_frame_path=outdir, overwrite=True, extract_from_frame=0, extract_to_frame=-1, numeric_files_output=True, out_img_format='png') + + process_video_depth(mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth, orig_vid_fps=in_vid_fps, real_audio_track=file.name, raw_output_imgs_path=outdir, img_batch_id=None, ffmpeg_location=f_location, ffmpeg_crf=f_crf, ffmpeg_preset=f_preset, f_models_path=f_models_path, keep_depth_imgs=keep_imgs, orig_vid_name=folder_name) + +def process_video_depth(mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth, orig_vid_fps, real_audio_track, raw_output_imgs_path, img_batch_id, ffmpeg_location, ffmpeg_crf, ffmpeg_preset, f_models_path, keep_depth_imgs, orig_vid_name): + devices.torch_gc() + + print("Vid2depth progress (it's OK if it finishes before 100%):") + + upscaled_path = os.path.join(raw_output_imgs_path, 'depth_frames') + if orig_vid_name is not None: # upscaling a video (deforum or unrelated) + custom_upscale_path = "{}_{}".format(upscaled_path, orig_vid_name) + else: # upscaling after a deforum run: + custom_upscale_path = "{}_{}".format(upscaled_path, img_batch_id) + + temp_convert_raw_png_path = os.path.join(raw_output_imgs_path, "tmp_depth_folder") + duplicate_pngs_from_folder(raw_output_imgs_path, temp_convert_raw_png_path, img_batch_id, orig_vid_name) + + videogen = [] + for f in os.listdir(temp_convert_raw_png_path): + # double check for old _depth_ files, not really needed probably but keeping it for now + if '_depth_' not in f: + videogen.append(f) + + videogen.sort(key= lambda x:int(x.split('.')[0])) + vid_out = None + + if not os.path.exists(custom_upscale_path): + os.mkdir(custom_upscale_path) + + # Loading the chosen model + if 'Mixed' in mode: + model = (load_depth_model(f_models_path, midas_weight_vid2depth), load_anime_model()) + elif 'Depth' in mode: + model = load_depth_model(f_models_path, midas_weight_vid2depth) + elif 'Anime' in mode: + model = load_anime_model() + else: + model = None + + # Upscaling is a slow and demanding operation, so we don't need as much parallelization here + for i in tqdm(range(len(videogen)), desc="Vid2depth"): + lastframe = videogen[i] + img_path = os.path.join(temp_convert_raw_png_path, lastframe) + image = process_frame(model, Image.open(img_path).convert("RGB"), mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth) + filename = '{}/{:0>9d}.png'.format(custom_upscale_path, i) + image.save(filename) + + # Cleaning up and freeing the memory before stitching + model = None + gc.collect() + devices.torch_gc() + + shutil.rmtree(temp_convert_raw_png_path) + # stitch video from upscaled frames, and add audio if needed + try: + print (f"*Passing depth frames to ffmpeg...*") + vid_out_path = stitch_video(img_batch_id, orig_vid_fps, custom_upscale_path, real_audio_track, ffmpeg_location, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth, ffmpeg_crf, ffmpeg_preset, keep_depth_imgs, orig_vid_name) + # remove folder with raw (non-upscaled) vid input frames in case of input VID and not PNGs + if orig_vid_name is not None: + shutil.rmtree(raw_output_imgs_path) + except Exception as e: + print(f'Video stitching gone wrong. *Vid2depth frames were saved to HD as backup!*. Actual error: {e}') + + gc.collect() + devices.torch_gc() + +def process_frame(model, image, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth): + # Get grayscale foreground map + if 'None' in mode: + depth = process_depth(image, 'None', thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur) + elif not 'Mixed' in mode: + depth = process_frame_depth(model, np.array(image), midas_weight_vid2depth) if 'Depth' in mode else process_frame_anime(model, np.array(image)) + depth = process_depth(depth, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur) + else: + if thresholding == 'None': + raise "Mixed mode doesn't work with no thresholding!" + depth_depth = process_frame_depth(model[0], np.array(image), midas_weight_vid2depth) + depth_depth = process_depth(depth_depth, 'Depth', thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur) + anime_depth = process_frame_anime(model[1], np.array(image)) + anime_depth = process_depth(anime_depth, 'Anime', 'Simple', 32, 255, adapt_block_size, adapt_c, invert, end_blur) + depth = ImageChops.logical_or(depth_depth.convert('1'), anime_depth.convert('1')) + + return depth + +def process_depth(depth, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur): + depth = depth.convert('L') + # Depth mode need inverting whereas Anime mode doesn't + # (invert and 'Depth' in mode) or (not invert and not 'Depth' in mode) + if (invert and 'None' in mode) or (invert is ('Depth' in mode)): + depth = ImageOps.invert(depth) + + depth = np.array(depth) + + # Apply thresholding + if thresholding == 'Simple': + _, depth = cv2.threshold(depth, threshold_value, threshold_value_max, cv2.THRESH_BINARY) + elif thresholding == 'Simple (Auto-value)': + _, depth = cv2.threshold(depth, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + elif thresholding == 'Adaptive (Mean)': + depth = cv2.adaptiveThreshold(depth, threshold_value_max, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, int(adapt_block_size), adapt_c) + elif thresholding == 'Adaptive (Gaussian)': + depth = cv2.adaptiveThreshold(depth, threshold_value_max, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, int(adapt_block_size), adapt_c) + + # Apply slight blur in the end to smoothen the edges after initial thresholding + if end_blur > 0: + depth = cv2.GaussianBlur(depth, (5, 5), end_blur) + + if thresholding == 'None' or end_blur == 0: + # Return a graymap + return Image.fromarray(depth).convert('L') + else: + # This commits thresholding again, but on the already processed image, so we don't need to set it up as much + return Image.fromarray(cv2.threshold(depth, 127, 255, cv2.THRESH_BINARY)[1]).convert('L') + +def stitch_video(img_batch_id, fps, img_folder_path, audio_path, ffmpeg_location, mode, thresholding, threshold_value, threshold_value_max, adapt_block_size, adapt_c, invert, end_blur, midas_weight_vid2depth, f_crf, f_preset, keep_imgs, orig_vid_name): + parent_folder = os.path.dirname(img_folder_path) + grandparent_folder = os.path.dirname(parent_folder) + mode = str(mode).replace('\\', '_').replace(' ', '_').replace('(', '_').replace(')', '_') + mp4_path = os.path.join(grandparent_folder, str(orig_vid_name if orig_vid_name is not None else img_batch_id) +'_depth_'+f"{thresholding}") + + mp4_path = mp4_path + '.mp4' + + t = os.path.join(img_folder_path, "%09d.png") + add_soundtrack = 'None' + if not audio_path is None: + add_soundtrack = 'File' + + exception_raised = False + try: + ffmpeg_stitch_video(ffmpeg_location=ffmpeg_location, fps=fps, outmp4_path=mp4_path, stitch_from_frame=0, stitch_to_frame=1000000, imgs_path=t, add_soundtrack=add_soundtrack, audio_path=audio_path, crf=f_crf, preset=f_preset) + except Exception as e: + exception_raised = True + print(f"An error occurred while stitching the video: {e}") + + if not exception_raised and not keep_imgs: + shutil.rmtree(img_folder_path) + + if (keep_imgs and orig_vid_name is not None) or (orig_vid_name is not None and exception_raised is True): + shutil.move(img_folder_path, grandparent_folder) + + return mp4_path + +# Midas/Adabins Depth mode with the usual workflow +def load_depth_model(models_path, midas_weight_vid2depth): + from .depth import DepthModel + device = ('cpu' if cmd_opts.lowvram or cmd_opts.medvram else sh_device) + keep_in_vram = False # TODO: Future - handle this too? + print('Loading Depth Model') + depth_model = DepthModel(models_path, device, not cmd_opts.no_half, keep_in_vram=keep_in_vram) + return depth_model + +# Anime Remove Background by skytnt and onnx model +# https://huggingface.co/spaces/skytnt/anime-remove-background/blob/main/app.py +def load_anime_model(): + # Installing its deps on demand + print('Checking ARB dependencies') + from launch import is_installed, run_pip + libs = ["onnx", "onnxruntime-gpu", "huggingface_hub"] + for lib in libs: + if not is_installed(lib): + run_pip(f"install {lib}", lib) + + try: + import onnxruntime as rt + import huggingface_hub + except Exception as e: + raise f"onnxruntime has not been installed correctly! Anime Remove Background mode is unable to function. The actual exception is: {e}. Note, that you'll need internet connection for the first run!" + + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] + model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") + return rt.InferenceSession(model_path, providers=providers) + +def get_mask(rmbg_model, img, s=1024): + img = (img / 255).astype(np.float32) + h, w = h0, w0 = img.shape[:-1] + h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) + ph, pw = s - h, s - w + img_input = np.zeros([s, s, 3], dtype=np.float32) + img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h)) + img_input = np.transpose(img_input, (2, 0, 1)) + img_input = img_input[np.newaxis, :] + mask = rmbg_model.run(None, {'img': img_input})[0][0] + mask = np.transpose(mask, (1, 2, 0)) + mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] + mask = cv2.resize(mask, (w0, h0)) + # TODO: pass in batches + mask = (mask * 255).astype(np.uint8) + return mask + +def process_frame_depth(depth_model, image, midas_weight): + opencv_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + depth = depth_model.predict(opencv_image, midas_weight, not cmd_opts.no_half) + return depth_model.to_image(depth) + +def process_frame_anime(model, image): + return Image.fromarray(get_mask(model, image), 'L') diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/video_audio_utilities.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/video_audio_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..8ed25f34f4cf944456926f0f73e4499f7e257156 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/video_audio_utilities.py @@ -0,0 +1,501 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import cv2 +import shutil +import math +import requests +import subprocess +import time +import re +import glob +import concurrent.futures +from pkg_resources import resource_filename +from modules.shared import state, opts +from .general_utils import checksum, clean_gradio_path_strings, debug_print +from basicsr.utils.download_util import load_file_from_url +from .rich import console +import shutil +from threading import Thread + +def convert_image(input_path, output_path): + # Read the input image + img = cv2.imread(input_path) + # Get the file extension of the output path + out_ext = os.path.splitext(output_path)[1].lower() + # Convert the image to the specified output format + if out_ext == ".png": + cv2.imwrite(output_path, img, [cv2.IMWRITE_PNG_COMPRESSION, 9]) + elif out_ext == ".jpg" or out_ext == ".jpeg": + cv2.imwrite(output_path, img, [cv2.IMWRITE_JPEG_QUALITY, 99]) + elif out_ext == ".bmp": + cv2.imwrite(output_path, img) + else: + print(f"Unsupported output format: {out_ext}") + +def get_ffmpeg_params(): # get ffmpeg params from webui's settings -> deforum tab. actual opts are set in deforum.py + f_location = opts.data.get("deforum_ffmpeg_location", find_ffmpeg_binary()) + f_crf = opts.data.get("deforum_ffmpeg_crf", 17) + f_preset = opts.data.get("deforum_ffmpeg_preset", 'slow') + + return [f_location, f_crf, f_preset] + +def get_ffmpeg_paths(outdir, timestring, anim_args, video_args, output_suffix=''): + image_path = os.path.join(outdir, f"{timestring}_%09d.png") + mp4_path = os.path.join(outdir, f"{timestring}{output_suffix}.mp4") + + real_audio_track = None + if video_args.add_soundtrack != 'None': + real_audio_track = anim_args.video_init_path if video_args.add_soundtrack == 'Init Video' else video_args.soundtrack_path + + srt_path = None + if opts.data.get("deforum_save_gen_info_as_srt", False) and opts.data.get("deforum_embed_srt", False): + srt_path = os.path.join(outdir, f"{timestring}.srt") + + return [image_path, mp4_path, real_audio_track, srt_path] + +# e.g gets 'x2' returns just 2 as int +def extract_number(string): + return int(string[1:]) if len(string) > 1 and string[1:].isdigit() else -1 + +def save_frame(image, file_path): + cv2.imwrite(file_path, image) + +def vid2frames(video_path, video_in_frame_path, n=1, overwrite=True, extract_from_frame=0, extract_to_frame=-1, out_img_format='jpg', numeric_files_output = False): + start_time = time.time() + if (extract_to_frame <= extract_from_frame) and extract_to_frame != -1: + raise RuntimeError('Error: extract_to_frame can not be higher than extract_from_frame') + + if n < 1: n = 1 #HACK Gradio interface does not currently allow min/max in gr.Number(...) + + video_path = clean_gradio_path_strings(video_path) + # check vid path using a function and only enter if we get True + if is_vid_path_valid(video_path): + + name = get_frame_name(video_path) + + if not (video_path.startswith('http://') or video_path.startswith('https://')): + video_path = os.path.realpath(video_path) + + vidcap = cv2.VideoCapture(video_path) + video_fps = vidcap.get(cv2.CAP_PROP_FPS) + + input_content = [] + if os.path.exists(video_in_frame_path) : + input_content = os.listdir(video_in_frame_path) + + # check if existing frame is the same video, if not we need to erase it and repopulate + if len(input_content) > 0 and numeric_files_output is False: + #get the name of the existing frame + content_name = get_frame_name(input_content[0]) + if not content_name.startswith(name): + overwrite = True + + # grab the frame count to check against existing directory len + frame_count = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) + + # raise error if the user wants to skip more frames than exist + if n >= frame_count : + raise RuntimeError('Skipping more frames than input video contains. extract_nth_frames larger than input frames') + + expected_frame_count = math.ceil(frame_count / n) + # Check to see if the frame count is matches the number of files in path + if overwrite or expected_frame_count != len(input_content): + shutil.rmtree(video_in_frame_path) + os.makedirs(video_in_frame_path, exist_ok=True) # just deleted the folder so we need to make it again + input_content = os.listdir(video_in_frame_path) + + print(f"Trying to extract frames from video with input FPS of {video_fps}. Please wait patiently.") + if len(input_content) == 0: + vidcap.set(cv2.CAP_PROP_POS_FRAMES, extract_from_frame) # Set the starting frame + success,image = vidcap.read() + count = extract_from_frame + t=0 + success = True + max_workers = int(max(1, (os.cpu_count() / 2) - 1)) # set max threads to cpu cores halved, minus 1. minimum is 1 + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + while success: + if state.interrupted: + return + if (count <= extract_to_frame or extract_to_frame == -1) and count % n == 0: + if numeric_files_output == True: + file_name = f"{t:09}.{out_img_format}" + else: + file_name = f"{name}{t:09}.{out_img_format}" + file_path = os.path.join(video_in_frame_path, file_name) + executor.submit(save_frame, image, file_path) + t += 1 + count += 1 + success, image = vidcap.read() + print(f"Extracted {count} frames from video in {time.time() - start_time:.2f} seconds!") + else: + print("Frames already unpacked") + vidcap.release() + return video_fps + +# make sure the video_path provided is an existing local file or a web URL with a supported file extension +def is_vid_path_valid(video_path): + # make sure file format is supported! + file_formats = ["mov", "mpeg", "mp4", "m4v", "avi", "mpg", "webm"] + extension = video_path.rsplit('.', 1)[-1].lower() + # vid path is actually a URL, check it + if video_path.startswith('http://') or video_path.startswith('https://'): + response = requests.head(video_path, allow_redirects=True) + if response.status_code == 404: + raise ConnectionError(f"Video URL {video_path} is not valid. Response status code: {response.status_code}") + elif response.status_code == 302: + response = requests.head(response.headers['location'], allow_redirects=True) + if response.status_code != 200: + raise ConnectionError(f"Video URL {video_path} is not valid. Response status code: {response.status_code}") + if extension not in file_formats: + raise ValueError(f"Video file {video_path} has format '{extension}', which not supported. Supported formats are: {file_formats}") + else: + video_path = os.path.realpath(video_path) + if not os.path.exists(video_path): + raise RuntimeError(f"Video path does not exist: {video_path}") + if extension not in file_formats: + raise ValueError(f"Video file {video_path} has format '{extension}', which is not supported. Supported formats are: {file_formats}") + return True + +# quick-retreive frame count, FPS and H/W dimensions of a video (local or URL-based) +def get_quick_vid_info(vid_path): + vidcap = cv2.VideoCapture(vid_path) + video_fps = vidcap.get(cv2.CAP_PROP_FPS) + video_frame_count = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) + video_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)) + video_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + vidcap.release() + if video_fps.is_integer(): + video_fps = int(video_fps) + + return video_fps, video_frame_count, (video_width, video_height) + +# Stitch images to a h264 mp4 video using ffmpeg +def ffmpeg_stitch_video(ffmpeg_location=None, fps=None, outmp4_path=None, stitch_from_frame=0, stitch_to_frame=None, imgs_path=None, add_soundtrack=None, audio_path=None, crf=17, preset='veryslow', srt_path=None): + start_time = time.time() + + print(f"Got a request to stitch frames to video using FFmpeg.\nFrames:\n{imgs_path}\nTo Video:\n{outmp4_path}") + msg_to_print = f"Stitching *video*..." + console.print(msg_to_print, style="blink yellow", end="") + if stitch_to_frame == -1: + stitch_to_frame = 999999999 + try: + cmd = [ + ffmpeg_location, + '-y', + '-r', str(float(fps)), + '-start_number', str(stitch_from_frame), + '-i', imgs_path, + '-frames:v', str(stitch_to_frame), + '-c:v', 'libx264', + '-vf', + f'fps={float(fps)}', + '-pix_fmt', 'yuv420p', + '-crf', str(crf), + '-preset', preset, + '-pattern_type', 'sequence' + ] + cmd.append('-vcodec') + cmd.append('png' if imgs_path[0].find('.png') != -1 else 'libx264') + cmd.append(outmp4_path) + + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + stdout, stderr = process.communicate() + except FileNotFoundError: + print("\r" + " " * len(msg_to_print), end="", flush=True) + print(f"\r{msg_to_print}", flush=True) + raise FileNotFoundError("FFmpeg not found. Please make sure you have a working ffmpeg path under 'ffmpeg_location' parameter.") + except Exception as e: + print("\r" + " " * len(msg_to_print), end="", flush=True) + print(f"\r{msg_to_print}", flush=True) + raise Exception(f'Error stitching frames to video. Actual runtime error:{e}') + + add_soundtrack_status = None + add_soundtrack_success = None + if add_soundtrack != 'None': + try: + audio_path = clean_gradio_path_strings(audio_path) + audio_add_start_time = time.time() + cmd = [ + ffmpeg_location, + '-i', + outmp4_path, + '-i', + audio_path, + '-map', '0:v', + '-map', '1:a', + '-c:v', 'copy', + '-shortest', + outmp4_path+'.temp.mp4' + ] + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + stdout, stderr = process.communicate() + if process.returncode != 0: + raise RuntimeError(stderr) + os.replace(outmp4_path+'.temp.mp4', outmp4_path) + add_soundtrack_status = f"\rFFmpeg audio embedding \033[0;32mdone\033[0m in {time.time() - audio_add_start_time:.2f} seconds!" + add_soundtrack_success = True + except Exception as e: + add_soundtrack_status = f"\rError adding audio to video: {e}" + add_soundtrack_success = False + + add_srt = opts.data.get("deforum_save_gen_info_as_srt", False) and opts.data.get("deforum_embed_srt", False) and srt_path is not None + add_srt_status = None + add_srt_success = None + if add_srt: + try: + srt_add_start_time = time.time() + cmd = [ + ffmpeg_location, + '-i', outmp4_path, + '-i', srt_path, + '-c', 'copy', + '-c:s', 'mov_text', + '-metadata:s:s:0', 'title=Deforum Data', + outmp4_path+'.temp.mp4' + ] + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + stdout, stderr = process.communicate() + if process.returncode != 0: + raise RuntimeError(stderr) + os.replace(outmp4_path+'.temp.mp4', outmp4_path) + add_srt_status = f"\rFFmpeg subtitle embedding \033[0;32mdone\033[0m in {time.time() - srt_add_start_time:.2f} seconds!" + add_srt_success = True + except Exception as e: + add_srt_status = f"\rError adding subtitles to video: {e}" + add_srt_success = False + + print("\r" + " " * len(msg_to_print), end="", flush=True) + print(f"\r{msg_to_print}", flush=True) + + status_summary = f"\rVideo stitching \033[0;32mdone\033[0m in {time.time() - start_time:.2f} seconds!" + if add_soundtrack_status: + print(add_soundtrack_status, flush=True) + status_summary += " Audio embedded successfully." if add_soundtrack_success else " Sorry, no audio - see above for errors." + if add_srt_status: + print(add_srt_status, flush=True) + status_summary += " Subtitles embedded successfully." if add_srt_success else " Sorry, no subtitles - see above for errors." + + print(status_summary, flush=True) + +def get_frame_name(path): + name = os.path.basename(path) + name = os.path.splitext(name)[0] + return name + +def get_next_frame(outdir, video_path, frame_idx, mask=False): + frame_path = 'inputframes' + if (mask): frame_path = 'maskframes' + return os.path.join(outdir, frame_path, get_frame_name(video_path) + f"{frame_idx:09}.jpg") + +def find_ffmpeg_binary(): + try: + import google.colab + return 'ffmpeg' + except: + pass + for package in ['imageio_ffmpeg', 'imageio-ffmpeg']: + try: + package_path = resource_filename(package, 'binaries') + files = [os.path.join(package_path, f) for f in os.listdir(package_path) if f.startswith("ffmpeg-")] + files.sort(key=lambda x: os.path.getmtime(x), reverse=True) + return files[0] if files else 'ffmpeg' + except: + return 'ffmpeg' + +# These 2 functions belong to "stitch frames to video" in Output tab +def get_manual_frame_to_vid_output_path(input_path): + dir_name = os.path.dirname(input_path) + folder_name = os.path.basename(dir_name) + output_path = os.path.join(dir_name, f"{folder_name}.mp4") + i = 1 + while os.path.exists(output_path): + output_path = os.path.join(dir_name, f"{folder_name}_{i}.mp4") + i += 1 + return output_path + +def direct_stitch_vid_from_frames(image_path, fps, add_soundtrack, audio_path): + f_location, f_crf, f_preset = get_ffmpeg_params() + matching_files = glob.glob(re.sub(r'%\d*d', '*', image_path)) + min_id = None + for file in matching_files: + try: + id = int(re.search(r'(\d+)(?=\.\w+$)', file).group(1)) + min_id = min(min_id, id) if min_id is not None else id + except (AttributeError, ValueError): + pass + if min_id is None or not all(os.path.isfile(image_path % (min_id + i)) for i in range(2)): + print("Couldn't find images that match the provided path/ pattern. At least 2 matched images are required.") + return + out_mp4_path = get_manual_frame_to_vid_output_path(image_path) + ffmpeg_stitch_video(ffmpeg_location=f_location, fps=fps, outmp4_path=out_mp4_path, stitch_from_frame=min_id, stitch_to_frame=-1, imgs_path=image_path, add_soundtrack=add_soundtrack, audio_path=audio_path, crf=f_crf, preset=f_preset) +# end of 2 stitch frame to video funcs + +# returns True if filename (could be also media URL) contains an audio stream, othehrwise False +def media_file_has_audio(filename, ffmpeg_location): + result = subprocess.run([ffmpeg_location, "-i", filename, "-af", "volumedetect", "-f", "null", "-"], stdout=subprocess.DEVNULL, stderr=subprocess.PIPE) + output = result.stderr.decode() + return True if "Stream #0:1: Audio: " in output or "Stream #0:1(und): Audio" in output else False + +# download gifski binaries if needed - linux and windows only atm (apple users won't even see the option) +def check_and_download_gifski(models_folder, current_user_os): + if current_user_os == 'Windows': + file_name = 'gifski.exe' + checksum_value = 'b0dd261ad021c31c7fdb99db761b45165e6b2a7e8e09c5d070a2b8064b575d7a4976c364d8508b28a6940343119b16a23e9f7d76f1f3d5ff02289d3068b469cf' + download_url = 'https://github.com/hithereai/d/releases/download/giski-windows-bin/gifski.exe' + elif current_user_os == 'Linux': + file_name = 'gifski' + checksum_value = 'e65bf9502bca520a7fd373397e41078d5c73db12ec3e9b47458c282d076c04fa697adecb5debb5d37fc9cbbee0673bb95e78d92c1cf813b4f5cc1cabe96880ff' + download_url = 'https://github.com/hithereai/d/releases/download/gifski-linux-bin/gifski' + elif current_user_os == 'Mac': + file_name = 'gifski' + checksum_value = '622a65d25609677169ed2c1c53fd9aa496a98b357cf84d0c3627ae99c85a565d61ca42cdc4d24ed6d60403bb79b6866ce24f3c4b6fff58c4d27632264a96353c' + download_url = 'https://github.com/hithereai/d/releases/download/gifski-mac-bin/gifski' + else: # who are you then? + raise Exception(f"No support for OS type: {current_user_os}") + + file_path = os.path.join(models_folder, file_name) + + if not os.path.exists(file_path): + load_file_from_url(download_url, models_folder) + if current_user_os in ['Linux','Mac']: + os.chmod(file_path, 0o755) + if current_user_os == 'Mac': + # enable running the exec for mac users + os.system(f'xattr -d com.apple.quarantine "{file_path}"') + if checksum(file_path) != checksum_value: + raise Exception(f"Error while downloading {file_name}. Please download from: {download_url} and place in: {models_folder}") + +# create a gif using gifski - limited to up to 30 fps (from the ui; if users wanna try to hack it, results are not good, but possible up to 100 fps theoretically) +def make_gifski_gif(imgs_raw_path, imgs_batch_id, fps, models_folder, current_user_os): + msg_to_print = f"Stitching *gif* from frames using Gifski..." + # blink the msg in the cli until action is done + console.print(msg_to_print, style="blink yellow", end="") + start_time = time.time() + gifski_location = os.path.join(models_folder, 'gifski' + ('.exe' if current_user_os == 'Windows' else '')) + final_gif_path = os.path.join(imgs_raw_path, imgs_batch_id + '.gif') + if current_user_os == "Linux": + input_img_pattern = imgs_batch_id + '_0*.png' + input_img_files = [os.path.join(imgs_raw_path, file) for file in sorted(glob.glob(os.path.join(imgs_raw_path, input_img_pattern)))] + cmd = [gifski_location, '-o', final_gif_path] + input_img_files + ['--fps', str(fps), '--quality', str(95)] + elif current_user_os == "Windows": + input_img_pattern_for_gifski = os.path.join(imgs_raw_path, imgs_batch_id + '_0*.png') + cmd = [gifski_location, '-o', final_gif_path, input_img_pattern_for_gifski, '--fps', str(fps), '--quality', str(95)] + else: # should never this else as we check before, but just in case + print("\r" + " " * len(msg_to_print), end="", flush=True) + print(f"\r{msg_to_print}", flush=True) + raise Exception(f"No support for OS type: {current_user_os}") + + check_and_download_gifski(models_folder, current_user_os) + + try: + process = subprocess.run(cmd, capture_output=True, check=True, text=True, cwd=(models_folder if current_user_os == 'Mac' else None)) + print("\r" + " " * len(msg_to_print), end="", flush=True) + print(f"\r{msg_to_print}", flush=True) + print(f"GIF stitching \033[0;32mdone\033[0m in {time.time() - start_time:.2f} seconds!") + except Exception as e: + print("\r" + " " * len(msg_to_print), end="", flush=True) + print(f"\r{msg_to_print}", flush=True) + print(f"GIF stitching *failed* with error:\n{e}") + +def handle_imgs_deletion(vid_path=None, imgs_folder_path=None, batch_id=None): + try: + total_imgs_to_delete = count_matching_frames(imgs_folder_path, batch_id) + if total_imgs_to_delete is None or total_imgs_to_delete == 0: + return + print("Deleting raw images, as requested:") + _, fcount, _ = get_quick_vid_info(vid_path) + if fcount == total_imgs_to_delete: + total_imgs_deleted = delete_matching_frames(imgs_folder_path, batch_id) + print(f"Deleted {total_imgs_deleted} out of {total_imgs_to_delete} imgs!") + else: + print("Did not delete imgs as there was a mismatch between # of frames in folder, and # of frames in actual video. Please check and delete manually. ") + except Exception as e: + print(f"Error deleting raw images. Please delete them manually if you want. Actual error:\n{e}") + +# handle deletion of inputframes created by video frame extraction +def handle_input_frames_deletion(imgs_folder_path=None): + try: + total_imgs_to_delete = count_matching_frames(imgs_folder_path, None) + if total_imgs_to_delete is None or total_imgs_to_delete == 0: + return + print("Deleting input frames, as requested:") + total_imgs_deleted = delete_input_frames(imgs_folder_path) + print(f"Deleted {total_imgs_deleted} out of {total_imgs_to_delete} inputframes!") + os.rmdir(imgs_folder_path) + except Exception as e: + print(f"Error deleting input frames. Please delete them manually if you want. Actual error:\n{e}") + +def handle_cn_frames_deletion(cn_input_frames_list): + try: + for cn_inputframes_folder in cn_input_frames_list: + if os.path.exists(cn_inputframes_folder): + total_cn_imgs_to_delete = count_matching_frames(cn_inputframes_folder, None) + if total_cn_imgs_to_delete is None or total_cn_imgs_to_delete == 0: + continue + total_imgs_deleted = delete_input_frames(cn_inputframes_folder) + print(f"Deleted {total_imgs_deleted} CN inputframes out of {total_cn_imgs_to_delete}!") + os.rmdir(cn_inputframes_folder) + except Exception as e: + print(f"Error deleting CN input frames. Please delete them manually if you want. Actual error:\n{e}") + +def delete_matching_frames(from_folder, img_batch_id): + return sum(1 for f in os.listdir(from_folder) if get_matching_frame(f, img_batch_id) and os.remove(os.path.join(from_folder, f)) is None) + +# delete inputframes +def delete_input_frames(from_folder): + return sum(1 for f in os.listdir(from_folder) if os.remove(os.path.join(from_folder, f)) is None) + +def count_matching_frames(from_folder, img_batch_id): + if str(from_folder).endswith("inputframes"): + return sum(1 for f in os.listdir(from_folder)) + return sum(1 for f in os.listdir(from_folder) if get_matching_frame(f, img_batch_id)) + +def get_matching_frame(f, img_batch_id=None): + return ('png' in f or 'jpg' in f) and '-' not in f and '_depth_' not in f and ((img_batch_id is not None and f.startswith(img_batch_id) or img_batch_id is None)) + +def render_preview(args, anim_args, video_args, root, frame_idx, last_preview_frame): + is_preview_on = "on" in opts.data.get("deforum_preview", "off").lower() + preview_interval_frames = opts.data.get("deforum_preview_interval_frames", 50) + is_preview_frame = (frame_idx % preview_interval_frames) == 0 or (frame_idx - last_preview_frame) >= preview_interval_frames + is_close_to_end = frame_idx >= (anim_args.max_frames-1) + + debug_print(f"render preview video: frame_idx={frame_idx} preview_interval_frames={preview_interval_frames} anim_args.max_frames={anim_args.max_frames} is_preview_on={is_preview_on} is_preview_frame={is_preview_frame} is_close_to_end={is_close_to_end} ") + + if not is_preview_on or not is_preview_frame or is_close_to_end: + debug_print(f"No preview video on frame {frame_idx}.") + return last_preview_frame + + f_location, f_crf, f_preset = get_ffmpeg_params() # get params for ffmpeg exec + image_path, mp4_temp_path, real_audio_track, srt_path = get_ffmpeg_paths(args.outdir, root.timestring, anim_args, video_args, "_preview__rendering__") + mp4_preview_path = mp4_temp_path.replace("_preview__rendering__", "_preview") + def task(): + if os.path.exists(mp4_temp_path): + print(f"--! Skipping preview video on frame {frame_idx} (previous preview still rendering to {mp4_temp_path}...") + else: + print(f"--> Rendering preview video up to frame {frame_idx} to {mp4_preview_path}...") + try: + ffmpeg_stitch_video(ffmpeg_location=f_location, fps=video_args.fps, outmp4_path=mp4_temp_path, stitch_from_frame=0, stitch_to_frame=frame_idx, imgs_path=image_path, add_soundtrack=video_args.add_soundtrack, audio_path=real_audio_track, crf=f_crf, preset=f_preset, srt_path=srt_path) + finally: + shutil.move(mp4_temp_path, mp4_preview_path) + + if "concurrent" in opts.data.get("deforum_preview", "off").lower(): + Thread(target=task).start() + else: + task() + + return frame_idx diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/webui_sd_pipeline.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/webui_sd_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..05131ee449e77f9bcb36dad446746643459ec678 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/webui_sd_pipeline.py @@ -0,0 +1,53 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +from modules.processing import StableDiffusionProcessingImg2Img +from modules.shared import opts, sd_model +import os + +def get_webui_sd_pipeline(args, root): + # Set up the pipeline + p = StableDiffusionProcessingImg2Img( + sd_model=sd_model, + outpath_samples = opts.outdir_samples or opts.outdir_img2img_samples, + ) # we'll set up the rest later + + os.makedirs(args.outdir, exist_ok=True) + p.width, p.height = map(lambda x: x - x % 8, (args.W, args.H)) + p.steps = args.steps + p.seed = args.seed + p.sampler_name = args.sampler + p.tiling = args.tiling + p.restore_faces = args.restore_faces + p.subseed = root.subseed + p.subseed_strength = root.subseed_strength + p.seed_resize_from_w = args.seed_resize_from_w + p.seed_resize_from_h = args.seed_resize_from_h + p.fill = args.fill + p.batch_size = 1 # b.size 1 as this is DEFORUM :) + p.seed = args.seed + p.do_not_save_samples = True # Setting this to False will trigger webui's saving mechanism - and we will end up with duplicated files, and another folder within our destination folder - big no no. + p.sampler_name = args.sampler + p.mask_blur = args.mask_overlay_blur + p.extra_generation_params["Mask blur"] = args.mask_overlay_blur + p.n_iter = 1 + p.steps = args.steps + p.denoising_strength = 1 - args.strength + p.cfg_scale = args.scale + p.image_cfg_scale = args.pix2pix_img_cfg_scale + p.outpath_samples = args.outdir + + return p \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/word_masking.py b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/word_masking.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2ca5ed07776eaea0d36fabfcb1a21b93f3a3d9 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/scripts/deforum_helpers/word_masking.py @@ -0,0 +1,62 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import torch +from PIL import Image +from torchvision import transforms +from torch.nn.functional import interpolate +import cv2 + +preclipseg_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + transforms.Resize((512, 512)), #TODO: check if the size is hardcoded +]) + +def find_clipseg(): + basedirs = [os.getcwd()] + src_basedirs = [] + for basedir in basedirs: + src_basedirs.append(os.path.join(os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2]), 'deforum_helpers', 'src')) + + for basedir in src_basedirs: + pth = os.path.join(basedir, './clipseg/weights/rd64-uni.pth') + if os.path.exists(pth): + return pth + raise Exception('CLIPseg weights not found!') + +def setup_clipseg(root): + from clipseg.models.clipseg import CLIPDensePredT + model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64) + model.eval() + model.load_state_dict(torch.load(find_clipseg(), map_location=root.device), strict=False) + + model.to(root.device) + root.clipseg_model = model + +def get_word_mask(root, frame, word_mask): + if root.clipseg_model is None: + setup_clipseg(root) + img = preclipseg_transform(frame).to(root.device, dtype=torch.float32) + word_masks = [word_mask] + with torch.no_grad(): + preds = root.clipseg_model(img.repeat(len(word_masks),1,1,1), word_masks)[0] + + mask = torch.sigmoid(preds[0][0]).unsqueeze(0).unsqueeze(0) # add batch, channels dims + resized_mask = interpolate(mask, size=(frame.size[1], frame.size[0]), mode='bicubic').squeeze() # rescale mask back to the target resolution + numpy_array = resized_mask.multiply(255).to(dtype=torch.uint8,device='cpu').numpy() + return Image.fromarray(cv2.threshold(numpy_array, 32, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]) diff --git a/extensions-builtin/sd-webui-deforum/style.css b/extensions-builtin/sd-webui-deforum/style.css new file mode 100644 index 0000000000000000000000000000000000000000..7d6467f6dabdd39487c099643391ce4e31f9843c --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/style.css @@ -0,0 +1,83 @@ +/* +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +Contact the authors: https://deforum.github.io/ +*/ + +#vid_to_interpolate_chosen_file .w-full, #pics_to_interpolate_chosen_file .w-full, #vid_to_upscale_chosen_file .w-full, #controlnet_input_video_chosen_file .w-full, #controlnet_input_video_mask_chosen_file .w-full,#vid_to_depth_chosen_file .w-full { + display: flex !important; + align-items: flex-start !important; + justify-content: center !important; +} + +#tab_deforum_interface #hybrid_msg_html { + color: Tomato !important; + margin-top: 5px !important; + text-align: center !important; + font-size: 20px !important; + font-weight: bold !important; +} + +#tab_deforum_interface #leres_license_msg { + color: GoldenRod; +} + +#image_buttons_deforum #img2img_tab, +#image_buttons_deforum #inpaint_tab, +#image_buttons_deforum #extras_tab, +#save_zip_deforum, #save_deforum { + display: none !important; +} + +#main_top_info_accord .label-wrap { + gap:2px; + padding: 0.5rem; +} +#tab_deforum_interface #controlnet_not_found_html_msg, #tab_deforum_interface #depth_warp_msg_html { + color: Tomato; +} + +#below_interpolate_butts_msg { + text-align: center !important; +} + +#tab_deforum_interface #settings_path_msg { + margin: 0.6em; + display: flex; + align-items: flex-start; + justify-content: center; +} + +#tab_deforum_interface .tabs.gradio-tabs.svelte-1g805jl .svelte-vt1mxs.gap { + gap:4px !important; +} + +#tab_deforum_interface #main_top_info_accord { + padding: 1px; +} + +#add_soundtrack .svelte-1p9xokt { + padding: 2.25px; +} + +#tab_deforum_interface .wrap.svelte-xwlu1w, #custom_setting_file { + height: 85px !important; + min-height: 85px !important; +} + +#tab_deforum_interface .file-preview-holder { + overflow-y: auto; + max-height: 60px; +} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/tests/__snapshots__/deforum_postprocess_test.ambr b/extensions-builtin/sd-webui-deforum/tests/__snapshots__/deforum_postprocess_test.ambr new file mode 100644 index 0000000000000000000000000000000000000000..c4738d19e66dba456ab9b4bb7c090b56a32afabf --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/tests/__snapshots__/deforum_postprocess_test.ambr @@ -0,0 +1,101 @@ +# serializer version: 1 +# name: test_post_process_FILM + ''' + 1 + 00:00:00,000 --> 00:00:00,050 + F#: 0; Cadence: false; Seed: 1; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.002; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 2 + 00:00:00,050 --> 00:00:00,100 + F#: 1; Cadence: false; Seed: 2; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 3 + 00:00:00,100 --> 00:00:00,150 + F#: 2; Cadence: false; Seed: 3; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 4 + 00:00:00,150 --> 00:00:00,200 + F#: 3; Cadence: false; Seed: 4; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 5 + 00:00:00,200 --> 00:00:00,250 + F#: 4; Cadence: false; Seed: 5; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + + ''' +# --- +# name: test_post_process_RIFE + ''' + 1 + 00:00:00,000 --> 00:00:00,050 + F#: 0; Cadence: false; Seed: 1; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.002; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 2 + 00:00:00,050 --> 00:00:00,100 + F#: 1; Cadence: false; Seed: 2; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 3 + 00:00:00,100 --> 00:00:00,150 + F#: 2; Cadence: false; Seed: 3; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 4 + 00:00:00,150 --> 00:00:00,200 + F#: 3; Cadence: false; Seed: 4; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 5 + 00:00:00,200 --> 00:00:00,250 + F#: 4; Cadence: false; Seed: 5; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + + ''' +# --- +# name: test_post_process_UPSCALE + ''' + 1 + 00:00:00,000 --> 00:00:00,050 + F#: 0; Cadence: false; Seed: 1; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.002; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 2 + 00:00:00,050 --> 00:00:00,100 + F#: 1; Cadence: false; Seed: 2; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 3 + 00:00:00,100 --> 00:00:00,150 + F#: 2; Cadence: false; Seed: 3; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 4 + 00:00:00,150 --> 00:00:00,200 + F#: 3; Cadence: false; Seed: 4; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 5 + 00:00:00,200 --> 00:00:00,250 + F#: 4; Cadence: false; Seed: 5; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + + ''' +# --- +# name: test_post_process_UPSCALE_FILM + ''' + 1 + 00:00:00,000 --> 00:00:00,050 + F#: 0; Cadence: false; Seed: 1; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.002; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 2 + 00:00:00,050 --> 00:00:00,100 + F#: 1; Cadence: false; Seed: 2; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 3 + 00:00:00,100 --> 00:00:00,150 + F#: 2; Cadence: false; Seed: 3; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 4 + 00:00:00,150 --> 00:00:00,200 + F#: 3; Cadence: false; Seed: 4; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 5 + 00:00:00,200 --> 00:00:00,250 + F#: 4; Cadence: false; Seed: 5; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + + ''' +# --- diff --git a/extensions-builtin/sd-webui-deforum/tests/__snapshots__/deforum_test.ambr b/extensions-builtin/sd-webui-deforum/tests/__snapshots__/deforum_test.ambr new file mode 100644 index 0000000000000000000000000000000000000000..1928bc3a912e1e4d148e6739f5569e6e437d2731 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/tests/__snapshots__/deforum_test.ambr @@ -0,0 +1,101 @@ +# serializer version: 1 +# name: test_3d_mode + ''' + 1 + 00:00:00,000 --> 00:00:00,050 + F#: 0; Cadence: false; Seed: 1; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.002; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 2 + 00:00:00,050 --> 00:00:00,100 + F#: 1; Cadence: false; Seed: 2; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 3 + 00:00:00,100 --> 00:00:00,150 + F#: 2; Cadence: false; Seed: 3; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 4 + 00:00:00,150 --> 00:00:00,200 + F#: 3; Cadence: false; Seed: 4; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 5 + 00:00:00,200 --> 00:00:00,250 + F#: 4; Cadence: false; Seed: 5; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + + ''' +# --- +# name: test_simple_settings + ''' + 1 + 00:00:00,000 --> 00:00:00,050 + F#: 0; Cadence: false; Seed: 1; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.002; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 2 + 00:00:00,050 --> 00:00:00,100 + F#: 1; Cadence: false; Seed: 2; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 3 + 00:00:00,100 --> 00:00:00,150 + F#: 2; Cadence: false; Seed: 3; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 4 + 00:00:00,150 --> 00:00:00,200 + F#: 3; Cadence: false; Seed: 4; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 5 + 00:00:00,200 --> 00:00:00,250 + F#: 4; Cadence: false; Seed: 5; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + + ''' +# --- +# name: test_with_hybrid_video + ''' + 1 + 00:00:00,000 --> 00:00:00,050 + F#: 0; Cadence: false; Seed: 1; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.002; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 2 + 00:00:00,050 --> 00:00:00,100 + F#: 1; Cadence: false; Seed: 2; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 3 + 00:00:00,100 --> 00:00:00,150 + F#: 2; Cadence: false; Seed: 3; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: -1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 4 + 00:00:00,150 --> 00:00:00,200 + F#: 3; Cadence: false; Seed: 4; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + 5 + 00:00:00,200 --> 00:00:00,250 + F#: 4; Cadence: false; Seed: 5; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 1; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 1; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera --neg + + + ''' +# --- +# name: test_with_parseq_inline + ''' + 1 + 00:00:00,000 --> 00:00:00,050 + F#: 0; Cadence: false; Seed: 1; Angle: 0; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.002; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 55; SubSStrSch: 0; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 55; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: Parseq prompt! --neg neg parseq prompt! + + 2 + 00:00:00,050 --> 00:00:00,100 + F#: 1; Cadence: false; Seed: 56; Angle: 30.111; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 56; SubSStrSch: 0.100; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 55.100; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: Parseq prompt! --neg neg parseq prompt! + + 3 + 00:00:00,100 --> 00:00:00,150 + F#: 2; Cadence: false; Seed: 56; Angle: 14.643; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 56; SubSStrSch: 0.200; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 55.200; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: Parseq prompt! --neg neg parseq prompt! + + 4 + 00:00:00,150 --> 00:00:00,200 + F#: 3; Cadence: false; Seed: 56; Angle: -8.348; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 56; SubSStrSch: 0.300; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 55.300; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: Parseq prompt! --neg neg parseq prompt! + + 5 + 00:00:00,200 --> 00:00:00,250 + F#: 4; Cadence: false; Seed: 56; Angle: -27.050; Tr.C.X: 0.500; Tr.C.Y: 0.500; Zoom: 1.003; TrX: 0; TrY: 0; TrZ: 0; RotX: 0; RotY: 0; RotZ: 0; PerFlT: 0; PerFlP: 0; PerFlG: 0; PerFlFV: 53; Noise: 0.040; StrSch: 0.650; CtrstSch: 1; CFGSch: 7; P2PCfgSch: 1.500; SubSSch: 56; SubSStrSch: 0.400; CkptSch: model1.ckpt; StepsSch: 25; SeedSch: 55.400; SamplerSchedule: Euler a; ClipskipSchedule: 2; NoiseMultiplierSchedule: 1.050; MaskSchedule: {video_mask}; NoiseMaskSchedule: {video_mask}; AmountSchedule: 0.050; KernelSchedule: 5; SigmaSchedule: 1; ThresholdSchedule: 0; AspectRatioSchedule: 1; FieldOfViewSchedule: 70; NearSchedule: 200; CadenceFlowFactorSchedule: 1; RedoFlowFactorSchedule: 1; FarSchedule: 10000; HybridCompAlphaSchedule: 0.500; HybridCompMaskBlendAlphaSchedule: 0.500; HybridCompMaskContrastSchedule: 1; HybridCompMaskAutoContrastCutoffHighSchedule: 100; HybridCompMaskAutoContrastCutoffLowSchedule: 0; HybridFlowFactorSchedule: 1; Prompt: Parseq prompt! --neg neg parseq prompt! + + + ''' +# --- diff --git a/extensions-builtin/sd-webui-deforum/tests/conftest.py b/extensions-builtin/sd-webui-deforum/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..d5cf84b69d24ae059c935552e1a108caaf73c3a1 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/tests/conftest.py @@ -0,0 +1,76 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import pytest +import subprocess +import sys +import os +from subprocess import Popen, PIPE, STDOUT +from pathlib import Path +from tenacity import retry, stop_after_delay, wait_fixed +import threading +import requests + +def pytest_addoption(parser): + parser.addoption("--start-server", action="store_true", help="start the server before the test run (if not specified, you must start the server manually)") + +@pytest.fixture +def cmdopt(request): + return request.config.getoption("--start-server") + +@retry(wait=wait_fixed(5), stop=stop_after_delay(60)) +def wait_for_service(url): + response = requests.get(url, timeout=(5, 5)) + print(f"Waiting for server to respond 200 at {url} (response: {response.status_code})...") + assert response.status_code == 200 + +@pytest.fixture(scope="session", autouse=True) +def start_server(request): + if request.config.getoption("--start-server"): + + # Kick off server subprocess + script_directory = os.path.dirname(__file__) + a1111_directory = Path(script_directory).parent.parent.parent # sd-webui/extensions/deforum/tests/ -> sd-webui + print(f"Starting server in {a1111_directory}...") + proc = Popen(["python", "-m", "coverage", "run", "--data-file=.coverage.server", "launch.py", + "--skip-prepare-environment", "--skip-torch-cuda-test", "--test-server", "--no-half", + "--disable-opt-split-attention", "--use-cpu", "all", "--add-stop-route", "--api", "--deforum-api", "--listen"], + cwd=a1111_directory, + stdout=PIPE, + stderr=STDOUT, + universal_newlines=True) + + # ensure server is killed at the end of the test run + request.addfinalizer(proc.kill) + + # Spin up separate thread to capture the server output to file and stdout + def server_console_manager(): + with proc.stdout, open('serverlog.txt', 'ab') as logfile: + for line in proc.stdout: + sys.stdout.write(f"[SERVER LOG] {line}") + sys.stdout.flush() + logfile.write(line.encode('utf-8')) + logfile.flush() + proc.wait() + + threading.Thread(target=server_console_manager).start() + + # Wait for deforum API to respond + wait_for_service('http://localhost:7860/deforum_api/jobs/') + + else: + print("Checking server is already running / waiting for it to come up...") + wait_for_service('http://localhost:7860/deforum_api/jobs/') \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/tests/deforum_postprocess_test.py b/extensions-builtin/sd-webui-deforum/tests/deforum_postprocess_test.py new file mode 100644 index 0000000000000000000000000000000000000000..65e41cde0fb94934030221c6bb94aafde7020f6e --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/tests/deforum_postprocess_test.py @@ -0,0 +1,181 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import glob +import json +import os + +import pytest +import requests +from moviepy.editor import VideoFileClip +from utils import API_BASE_URL, gpu_disabled, wait_for_job_to_complete + +from scripts.deforum_api_models import (DeforumJobPhase, + DeforumJobStatusCategory) +from scripts.deforum_helpers.subtitle_handler import get_user_values + +@pytest.mark.skipif(gpu_disabled(), reason="requires GPU-enabled server") +def test_post_process_FILM(snapshot): + with open('tests/testdata/simple.input_settings.txt', 'r') as settings_file: + deforum_settings = json.load(settings_file) + + deforum_settings["frame_interpolation_engine"] = "FILM" + deforum_settings["frame_interpolation_x_amount"] = 3 + deforum_settings["frame_interpolation_slow_mo_enabled"] = False + + response = requests.post(API_BASE_URL+"/batches", json={ + "deforum_settings":[deforum_settings], + "options_overrides": { + "deforum_save_gen_info_as_srt": True, + "deforum_save_gen_info_as_srt_params": get_user_values(), + } + }) + response.raise_for_status() + job_id = response.json()["job_ids"][0] + jobStatus = wait_for_job_to_complete(job_id) + + assert jobStatus.status == DeforumJobStatusCategory.SUCCEEDED, f"Job {job_id} failed: {jobStatus}" + + # Ensure parameters used at each frame have not regressed + srt_filenname = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.srt") + with open(srt_filenname, 'r') as srt_file: + assert srt_file.read() == snapshot + + # Ensure interpolated video format is as expected + video_filenames = glob.glob(f'{jobStatus.outdir}/*FILM*.mp4', recursive=True) + assert len(video_filenames) == 1, "Expected one FILM video to be generated" + + interpolated_video_filename = video_filenames[0] + clip = VideoFileClip(interpolated_video_filename) + assert clip.fps == deforum_settings['fps'] * deforum_settings["frame_interpolation_x_amount"] , "Video FPS does not match input settings (fps * interpolation amount)" + assert clip.duration * clip.fps == deforum_settings['max_frames'] * deforum_settings["frame_interpolation_x_amount"], "Video frame count does not match input settings (including interpolation)" + assert clip.size == [deforum_settings['W'], deforum_settings['H']] , "Video dimensions are not as expected" + +@pytest.mark.skipif(gpu_disabled(), reason="requires GPU-enabled server") +def test_post_process_RIFE(snapshot): + with open('tests/testdata/simple.input_settings.txt', 'r') as settings_file: + deforum_settings = json.load(settings_file) + + deforum_settings["frame_interpolation_engine"] = "RIFE v4.6" + deforum_settings["frame_interpolation_x_amount"] = 3 + deforum_settings["frame_interpolation_slow_mo_enabled"] = False + + response = requests.post(API_BASE_URL+"/batches", json={ + "deforum_settings":[deforum_settings], + "options_overrides": { + "deforum_save_gen_info_as_srt": True, + "deforum_save_gen_info_as_srt_params": get_user_values(), + } + }) + response.raise_for_status() + job_id = response.json()["job_ids"][0] + jobStatus = wait_for_job_to_complete(job_id) + + assert jobStatus.status == DeforumJobStatusCategory.SUCCEEDED, f"Job {job_id} failed: {jobStatus}" + + # Ensure parameters used at each frame have not regressed + srt_filenname = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.srt") + with open(srt_filenname, 'r') as srt_file: + assert srt_file.read() == snapshot + + # Ensure interpolated video format is as expected + video_filenames = glob.glob(f'{jobStatus.outdir}/*RIFE*.mp4', recursive=True) + assert len(video_filenames) == 1, "Expected one RIFE video to be generated" + + interpolated_video_filename = video_filenames[0] + clip = VideoFileClip(interpolated_video_filename) + assert clip.fps == deforum_settings['fps'] * deforum_settings["frame_interpolation_x_amount"] , "Video FPS does not match input settings (fps * interpolation amount)" + assert clip.duration * clip.fps == deforum_settings['max_frames'] * deforum_settings["frame_interpolation_x_amount"], "Video frame count does not match input settings (including interpolation)" + assert clip.size == [deforum_settings['W'], deforum_settings['H']] , "Video dimensions are not as expected" + +@pytest.mark.skipif(gpu_disabled(), reason="requires GPU-enabled server") +def test_post_process_UPSCALE(snapshot): + with open('tests/testdata/simple.input_settings.txt', 'r') as settings_file: + deforum_settings = json.load(settings_file) + + deforum_settings["r_upscale_video"] = True + deforum_settings["r_upscale_factor"] = "x4" + deforum_settings["r_upscale_model"] = "realesrgan-x4plus" + + response = requests.post(API_BASE_URL+"/batches", json={ + "deforum_settings":[deforum_settings], + "options_overrides": { + "deforum_save_gen_info_as_srt": True, + "deforum_save_gen_info_as_srt_params": get_user_values(), + } + }) + response.raise_for_status() + job_id = response.json()["job_ids"][0] + jobStatus = wait_for_job_to_complete(job_id) + + assert jobStatus.status == DeforumJobStatusCategory.SUCCEEDED, f"Job {job_id} failed: {jobStatus}" + + # Ensure parameters used at each frame have not regressed + srt_filenname = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.srt") + with open(srt_filenname, 'r') as srt_file: + assert srt_file.read() == snapshot + + # Ensure interpolated video format is as expected + video_filenames = glob.glob(f'{jobStatus.outdir}/*Upscaled*.mp4', recursive=True) + assert len(video_filenames) == 1, "Expected one upscaled video to be generated" + + interpolated_video_filename = video_filenames[0] + clip = VideoFileClip(interpolated_video_filename) + assert clip.fps == deforum_settings['fps'] , "Video FPS does not match input settings" + assert clip.duration * clip.fps == deforum_settings['max_frames'], "Video frame count does not match input settings" + assert clip.size == [deforum_settings['W']*4, deforum_settings['H']*4] , "Video dimensions are not as expected (including upscaling)" + + +@pytest.mark.skipif(gpu_disabled(), reason="requires GPU-enabled server") +def test_post_process_UPSCALE_FILM(snapshot): + with open('tests/testdata/simple.input_settings.txt', 'r') as settings_file: + deforum_settings = json.load(settings_file) + + deforum_settings["r_upscale_video"] = True + deforum_settings["r_upscale_factor"] = "x4" + deforum_settings["r_upscale_model"] = "realesrgan-x4plus" + deforum_settings["frame_interpolation_engine"] = "FILM" + deforum_settings["frame_interpolation_x_amount"] = 3 + deforum_settings["frame_interpolation_slow_mo_enabled"] = False + deforum_settings["frame_interpolation_use_upscaled"] = True + + response = requests.post(API_BASE_URL+"/batches", json={ + "deforum_settings":[deforum_settings], + "options_overrides": { + "deforum_save_gen_info_as_srt": True, + "deforum_save_gen_info_as_srt_params": get_user_values(), + } + }) + response.raise_for_status() + job_id = response.json()["job_ids"][0] + jobStatus = wait_for_job_to_complete(job_id) + + assert jobStatus.status == DeforumJobStatusCategory.SUCCEEDED, f"Job {job_id} failed: {jobStatus}" + + # Ensure parameters used at each frame have not regressed + srt_filenname = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.srt") + with open(srt_filenname, 'r') as srt_file: + assert srt_file.read() == snapshot + + # Ensure interpolated video format is as expected + video_filenames = glob.glob(f'{jobStatus.outdir}/*FILM*upscaled*.mp4', recursive=True) + assert len(video_filenames) == 1, "Expected one upscaled video to be generated" + + interpolated_video_filename = video_filenames[0] + clip = VideoFileClip(interpolated_video_filename) + assert clip.fps == deforum_settings['fps'] * deforum_settings["frame_interpolation_x_amount"] , "Video FPS does not match input settings (fps * interpolation amount)" + assert clip.duration * clip.fps == deforum_settings['max_frames'] * deforum_settings["frame_interpolation_x_amount"], "Video frame count does not match input settings (including interpolation)" + assert clip.size == [deforum_settings['W']*4, deforum_settings['H']*4] , "Video dimensions are not as expected (including upscaling)" diff --git a/extensions-builtin/sd-webui-deforum/tests/deforum_test.py b/extensions-builtin/sd-webui-deforum/tests/deforum_test.py new file mode 100644 index 0000000000000000000000000000000000000000..43e8c68d586e12a84b28a1dd84b82f54b242e0b7 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/tests/deforum_test.py @@ -0,0 +1,182 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +import os +import json +from scripts.deforum_api_models import DeforumJobStatus, DeforumJobStatusCategory, DeforumJobPhase +import requests +from moviepy.editor import VideoFileClip +import glob +from pathlib import Path +from utils import wait_for_job_to_complete, wait_for_job_to_enter_phase, wait_for_job_to_enter_status, API_BASE_URL + +from scripts.deforum_helpers.subtitle_handler import get_user_values + +def test_simple_settings(snapshot): + with open('tests/testdata/simple.input_settings.txt', 'r') as settings_file: + deforum_settings = json.load(settings_file) + + response = requests.post(API_BASE_URL+"/batches", json={ + "deforum_settings":[deforum_settings], + "options_overrides": { + "deforum_save_gen_info_as_srt": True, + "deforum_save_gen_info_as_srt_params": get_user_values(), + } + }) + response.raise_for_status() + job_id = response.json()["job_ids"][0] + jobStatus = wait_for_job_to_complete(job_id) + + assert jobStatus.status == DeforumJobStatusCategory.SUCCEEDED, f"Job {job_id} failed: {jobStatus}" + + # Ensure parameters used at each frame have not regressed + srt_filenname = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.srt") + with open(srt_filenname, 'r') as srt_file: + assert srt_file.read() == snapshot + + # Ensure video format is as expected + video_filename = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.mp4") + clip = VideoFileClip(video_filename) + assert clip.fps == deforum_settings['fps'] , "Video FPS does not match input settings" + assert clip.duration * clip.fps == deforum_settings['max_frames'] , "Video frame count does not match input settings" + assert clip.size == [deforum_settings['W'], deforum_settings['H']] , "Video dimensions are not as expected" + + +def test_api_cancel_active_job(): + with open('tests/testdata/simple.input_settings.txt', 'r') as settings_file: + data = json.load(settings_file) + response = requests.post(API_BASE_URL+"/batches", json={"deforum_settings":[data]}) + response.raise_for_status() + job_id = response.json()["job_ids"][0] + wait_for_job_to_enter_phase(job_id, DeforumJobPhase.GENERATING) + + cancel_url = API_BASE_URL+"/jobs/"+job_id + response = requests.delete(cancel_url) + response.raise_for_status() + assert response.status_code == 200, f"DELETE request to {cancel_url} failed: {response.status_code}" + + jobStatus = wait_for_job_to_complete(job_id) + + assert jobStatus.status == DeforumJobStatusCategory.CANCELLED, f"Job {job_id} did not cancel: {jobStatus}" + + +def test_3d_mode(snapshot): + with open('tests/testdata/simple.input_settings.txt', 'r') as settings_file: + deforum_settings = json.load(settings_file) + + deforum_settings['animation_mode'] = "3D" + + response = requests.post(API_BASE_URL+"/batches", json={ + "deforum_settings":[deforum_settings], + "options_overrides": { + "deforum_save_gen_info_as_srt": True, + "deforum_save_gen_info_as_srt_params": get_user_values(), + } + }) + response.raise_for_status() + job_id = response.json()["job_ids"][0] + jobStatus = wait_for_job_to_complete(job_id) + + assert jobStatus.status == DeforumJobStatusCategory.SUCCEEDED, f"Job {job_id} failed: {jobStatus}" + + # Ensure parameters used at each frame have not regressed + srt_filenname = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.srt") + with open(srt_filenname, 'r') as srt_file: + assert srt_file.read() == snapshot + + # Ensure video format is as expected + video_filename = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.mp4") + clip = VideoFileClip(video_filename) + assert clip.fps == deforum_settings['fps'] , "Video FPS does not match input settings" + assert clip.duration * clip.fps == deforum_settings['max_frames'] , "Video frame count does not match input settings" + assert clip.size == [deforum_settings['W'], deforum_settings['H']] , "Video dimensions are not as expected" + + +def test_with_parseq_inline(snapshot): + with open('tests/testdata/simple.input_settings.txt', 'r') as settings_file: + deforum_settings = json.load(settings_file) + + with open('tests/testdata/parseq.json', 'r') as parseq_file: + parseq_data = json.load(parseq_file) + + deforum_settings['parseq_manifest'] = json.dumps(parseq_data) + + response = requests.post(API_BASE_URL+"/batches", json={ + "deforum_settings":[deforum_settings], + "options_overrides": { + "deforum_save_gen_info_as_srt": True, + "deforum_save_gen_info_as_srt_params": get_user_values(), + } + }) + response.raise_for_status() + job_id = response.json()["job_ids"][0] + jobStatus = wait_for_job_to_complete(job_id) + + assert jobStatus.status == DeforumJobStatusCategory.SUCCEEDED, f"Job {job_id} failed: {jobStatus}" + + # Ensure parameters used at each frame have not regressed + srt_filenname = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.srt") + with open(srt_filenname, 'r') as srt_file: + assert srt_file.read() == snapshot + + # Ensure video format is as expected + video_filename = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.mp4") + clip = VideoFileClip(video_filename) + assert clip.fps == deforum_settings['fps'] , "Video FPS does not match input settings" + assert clip.duration * clip.fps == deforum_settings['max_frames'] , "Video frame count does not match input settings" + assert clip.size == [deforum_settings['W'], deforum_settings['H']] , "Video dimensions are not as expected" + + +# def test_with_parseq_url(): + +def test_with_hybrid_video(snapshot): + with open('tests/testdata/simple.input_settings.txt', 'r') as settings_file: + deforum_settings = json.load(settings_file) + + with open('tests/testdata/parseq.json', 'r') as parseq_file: + parseq_data = json.load(parseq_file) + + init_video_local_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testdata", "example_init_vid.mp4") + deforum_settings['video_init_path'] = init_video_local_path + deforum_settings['extract_nth_frame'] = 200 # input video is 900 frames, so we should keep 5 frames + deforum_settings["hybrid_generate_inputframes"] = True + deforum_settings["hybrid_composite"] = "Normal" + + response = requests.post(API_BASE_URL+"/batches", json={ + "deforum_settings":[deforum_settings], + "options_overrides": { + "deforum_save_gen_info_as_srt": True, + "deforum_save_gen_info_as_srt_params": get_user_values(), + } + }) + response.raise_for_status() + job_id = response.json()["job_ids"][0] + jobStatus = wait_for_job_to_complete(job_id) + + assert jobStatus.status == DeforumJobStatusCategory.SUCCEEDED, f"Job {job_id} failed: {jobStatus}" + + # Ensure parameters used at each frame have not regressed + srt_filenname = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.srt") + with open(srt_filenname, 'r') as srt_file: + assert srt_file.read() == snapshot + + # Ensure video format is as expected + video_filename = os.path.join(jobStatus.outdir, f"{jobStatus.timestring}.mp4") + clip = VideoFileClip(video_filename) + assert clip.fps == deforum_settings['fps'] , "Video FPS does not match input settings" + assert clip.duration == 5 / deforum_settings['fps'], "Video frame count does not match input settings" + assert clip.size == [deforum_settings['W'], deforum_settings['H']] , "Video dimensions are not as expected" + diff --git a/extensions-builtin/sd-webui-deforum/tests/testdata/example_init_vid.mp4 b/extensions-builtin/sd-webui-deforum/tests/testdata/example_init_vid.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..9bce44c33e275d6107240a1101032a7835fd8eed --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/tests/testdata/example_init_vid.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71944d7430c461f0cd6e7fd10cee7eb72786352a3678fc7bc0ae3d410f72aece +size 1570024 diff --git a/extensions-builtin/sd-webui-deforum/tests/testdata/parseq.json b/extensions-builtin/sd-webui-deforum/tests/testdata/parseq.json new file mode 100644 index 0000000000000000000000000000000000000000..61d650a3c318707907f1c59c5e91d7a7510d15df --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/tests/testdata/parseq.json @@ -0,0 +1,231 @@ +{ + "meta": { + "generated_by": "sd_parseq", + "version": "0.1.94", + "generated_at": "Tue, 01 Aug 2023 03:02:03 GMT", + "doc_id": "doc-9d687a32-6bb4-41e3-a974-e8c5a6a18571", + "version_id": "version-9762c158-b3e0-471e-ad6b-97259c8a5141" + }, + "prompts": { + "format": "v2", + "enabled": true, + "commonPrompt": { + "name": "Common", + "positive": "", + "negative": "", + "allFrames": true, + "from": 0, + "to": 119, + "overlap": { + "inFrames": 0, + "outFrames": 0, + "type": "none", + "custom": "prompt_weight_1" + } + }, + "commonPromptPos": "append", + "promptList": [ + { + "name": "Prompt 1", + "positive": "Parseq prompt!", + "negative": "neg parseq prompt!", + "allFrames": false, + "from": 0, + "to": 119, + "overlap": { + "inFrames": 0, + "outFrames": 0, + "type": "none", + "custom": "prompt_weight_1" + } + } + ] + }, + "options": { + "input_fps": 20, + "bpm": 140, + "output_fps": 20, + "cc_window_width": 0, + "cc_window_slide_rate": 1, + "cc_use_input": false + }, + "managedFields": [ + "seed", + "angle" + ], + "displayedFields": [ + "seed", + "angle" + ], + "keyframes": [ + { + "frame": 0, + "zoom": 1, + "zoom_i": "C", + "seed": 55, + "noise": 0.04, + "strength": 0.6, + "prompt_weight_1": 1, + "prompt_weight_1_i": "bez(0,0.6,1,0.4)", + "prompt_weight_2": 0, + "prompt_weight_2_i": "bez(0,0.6,1,0.4)", + "angle": "", + "angle_i": "sin(p=1b, a=45)" + }, + { + "frame": 10, + "prompt_weight_1": 0, + "prompt_weight_2": 1, + "seed": 56 + } + ], + "timeSeries": [], + "keyframeLock": "frames", + "reverseRender": false, + "rendered_frames": [ + { + "frame": 0, + "seed": 55, + "angle": 0, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 55, + "subseed_strength": 0, + "seed_delta": 55, + "seed_pc": 98.21428571428571, + "angle_delta": 0, + "angle_pc": 0 + }, + { + "frame": 1, + "seed": 55.1, + "angle": 30.11087728614862, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 56, + "subseed_strength": 0.10000000000000142, + "seed_delta": 0.10000000000000142, + "seed_pc": 98.39285714285715, + "angle_delta": 30.11087728614862, + "angle_pc": 67.28163648031882 + }, + { + "frame": 2, + "seed": 55.2, + "angle": 44.7534852915723, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 56, + "subseed_strength": 0.20000000000000284, + "seed_delta": 0.10000000000000142, + "seed_pc": 98.57142857142858, + "angle_delta": 14.642608005423675, + "angle_pc": 100 + }, + { + "frame": 3, + "seed": 55.3, + "angle": 36.405764746872634, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 56, + "subseed_strength": 0.29999999999999716, + "seed_delta": 0.09999999999999432, + "seed_pc": 98.75, + "angle_delta": -8.347720544699662, + "angle_pc": 81.34732861516005 + }, + { + "frame": 4, + "seed": 55.4, + "angle": 9.356026086799169, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 56, + "subseed_strength": 0.3999999999999986, + "seed_delta": 0.10000000000000142, + "seed_pc": 98.92857142857142, + "angle_delta": -27.049738660073466, + "angle_pc": 20.905692653530693 + }, + { + "frame": 5, + "seed": 55.5, + "angle": -22.500000000000004, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 56, + "subseed_strength": 0.5, + "seed_delta": 0.10000000000000142, + "seed_pc": 99.10714285714286, + "angle_delta": -31.856026086799172, + "angle_pc": -50.275413978175834 + }, + { + "frame": 6, + "seed": 55.6, + "angle": -42.79754323328191, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 56, + "subseed_strength": 0.6000000000000014, + "seed_delta": 0.10000000000000142, + "seed_pc": 99.28571428571429, + "angle_delta": -20.297543233281903, + "angle_pc": -95.62952014676112 + }, + { + "frame": 7, + "seed": 55.7, + "angle": -41.109545593917034, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 56, + "subseed_strength": 0.7000000000000028, + "seed_delta": 0.10000000000000142, + "seed_pc": 99.46428571428572, + "angle_delta": 1.6879976393648732, + "angle_pc": -91.85775214172767 + }, + { + "frame": 8, + "seed": 55.8, + "angle": -18.303148938411006, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 56, + "subseed_strength": 0.7999999999999972, + "seed_delta": 0.09999999999999432, + "seed_pc": 99.64285714285714, + "angle_delta": 22.806396655506028, + "angle_pc": -40.89770622145878 + }, + { + "frame": 9, + "seed": 55.9, + "angle": 13.905764746872622, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 56, + "subseed_strength": 0.8999999999999986, + "seed_delta": 0.10000000000000142, + "seed_pc": 99.82142857142857, + "angle_delta": 32.208913685283626, + "angle_pc": 31.0719146369842 + }, + { + "frame": 10, + "seed": 56, + "angle": 38.97114317029975, + "deforum_prompt": "Parseq prompt! --neg neg parseq prompt!", + "subseed": 56, + "subseed_strength": 0, + "seed_delta": 0.10000000000000142, + "seed_pc": 100, + "angle_delta": 25.065378423427127, + "angle_pc": 87.07957138175908 + } + ], + "rendered_frames_meta": { + "seed": { + "max": 56, + "min": 55, + "isFlat": false + }, + "angle": { + "max": 44.7534852915723, + "min": -42.79754323328191, + "isFlat": false + } + } +} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/tests/testdata/simple.input_settings.txt b/extensions-builtin/sd-webui-deforum/tests/testdata/simple.input_settings.txt new file mode 100644 index 0000000000000000000000000000000000000000..28a10047bb10d2859cb6bf4df94d1b747491f3ad --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/tests/testdata/simple.input_settings.txt @@ -0,0 +1,255 @@ +{ + "W": 512, + "H": 512, + "show_info_on_ui": true, + "tiling": false, + "restore_faces": false, + "seed_resize_from_w": 0, + "seed_resize_from_h": 0, + "seed": 1, + "sampler": "Euler a", + "steps": 1, + "batch_name": "Deforum_{timestring}", + "seed_behavior": "iter", + "seed_iter_N": 1, + "use_init": false, + "strength": 0.8, + "strength_0_no_init": true, + "init_image": "None", + "use_mask": false, + "use_alpha_as_mask": false, + "mask_file": "", + "invert_mask": false, + "mask_contrast_adjust": 1.0, + "mask_brightness_adjust": 1.0, + "overlay_mask": true, + "mask_overlay_blur": 4, + "fill": 1, + "full_res_mask": true, + "full_res_mask_padding": 4, + "reroll_blank_frames": "ignore", + "reroll_patience": 10.0, + "prompts": { + "0": "tiny cute swamp bunny, highly detailed, intricate, ultra hd, sharp photo, crepuscular rays, in focus, by tomasz alen kopera", + "30": "anthropomorphic clean cat, surrounded by fractals, epic angle and pose, symmetrical, 3d, depth of field, ruan jia and fenghua zhong", + "60": "a beautiful coconut --neg photo, realistic", + "90": "a beautiful durian, trending on Artstation" + }, + "animation_prompts_positive": "", + "animation_prompts_negative": "", + "animation_mode": "2D", + "max_frames": 5, + "border": "replicate", + "angle": "0:(0)", + "zoom": "0:(1.0025+0.002*sin(1.25*3.14*t/30))", + "translation_x": "0:(0)", + "translation_y": "0:(0)", + "translation_z": "0:(0)", + "transform_center_x": "0:(0.5)", + "transform_center_y": "0:(0.5)", + "rotation_3d_x": "0:(0)", + "rotation_3d_y": "0:(0)", + "rotation_3d_z": "0:(0)", + "enable_perspective_flip": false, + "perspective_flip_theta": "0:(0)", + "perspective_flip_phi": "0:(0)", + "perspective_flip_gamma": "0:(0)", + "perspective_flip_fv": "0:(53)", + "noise_schedule": "0: (0.04)", + "strength_schedule": "0: (0.65)", + "contrast_schedule": "0: (1.0)", + "cfg_scale_schedule": "0: (7)", + "enable_steps_scheduling": false, + "steps_schedule": "0: (25)", + "fov_schedule": "0: (70)", + "aspect_ratio_schedule": "0: (1)", + "aspect_ratio_use_old_formula": false, + "near_schedule": "0: (200)", + "far_schedule": "0: (10000)", + "seed_schedule": "0:(s), 1:(-1), \"max_f-2\":(-1), \"max_f-1\":(s)", + "pix2pix_img_cfg_scale_schedule": "0:(1.5)", + "enable_subseed_scheduling": false, + "subseed_schedule": "0:(1)", + "subseed_strength_schedule": "0:(0)", + "enable_sampler_scheduling": false, + "sampler_schedule": "0: (\"Euler a\")", + "use_noise_mask": false, + "mask_schedule": "0: (\"{video_mask}\")", + "noise_mask_schedule": "0: (\"{video_mask}\")", + "enable_checkpoint_scheduling": false, + "checkpoint_schedule": "0: (\"model1.ckpt\"), 100: (\"model2.safetensors\")", + "enable_clipskip_scheduling": false, + "clipskip_schedule": "0: (2)", + "enable_noise_multiplier_scheduling": false, + "noise_multiplier_schedule": "0: (1.05)", + "resume_from_timestring": false, + "resume_timestring": "20230707221541", + "enable_ddim_eta_scheduling": false, + "ddim_eta_schedule": "0: (0)", + "enable_ancestral_eta_scheduling": false, + "ancestral_eta_schedule": "0: (1)", + "amount_schedule": "0: (0.05)", + "kernel_schedule": "0: (5)", + "sigma_schedule": "0: (1.0)", + "threshold_schedule": "0: (0.0)", + "color_coherence": "LAB", + "color_coherence_image_path": "", + "color_coherence_video_every_N_frames": 1, + "color_force_grayscale": false, + "legacy_colormatch": false, + "diffusion_cadence": 1, + "optical_flow_cadence": "None", + "cadence_flow_factor_schedule": "0: (1)", + "optical_flow_redo_generation": "None", + "redo_flow_factor_schedule": "0: (1)", + "diffusion_redo": 0, + "noise_type": "perlin", + "perlin_octaves": 4, + "perlin_persistence": 0.5, + "use_depth_warping": true, + "depth_algorithm": "Zoe", + "midas_weight": 0.4, + "padding_mode": "reflection", + "sampling_mode": "bicubic", + "save_depth_maps": false, + "video_init_path": "", + "extract_nth_frame": 1, + "extract_from_frame": 0, + "extract_to_frame": -1, + "overwrite_extracted_frames": false, + "use_mask_video": false, + "video_mask_path": "", + "hybrid_comp_alpha_schedule": "0:(0.5)", + "hybrid_comp_mask_blend_alpha_schedule": "0:(0.5)", + "hybrid_comp_mask_contrast_schedule": "0:(1)", + "hybrid_comp_mask_auto_contrast_cutoff_high_schedule": "0:(100)", + "hybrid_comp_mask_auto_contrast_cutoff_low_schedule": "0:(0)", + "hybrid_flow_factor_schedule": "0:(1)", + "hybrid_generate_inputframes": false, + "hybrid_generate_human_masks": "None", + "hybrid_use_first_frame_as_init_image": true, + "hybrid_motion": "None", + "hybrid_motion_use_prev_img": false, + "hybrid_flow_consistency": false, + "hybrid_consistency_blur": 2, + "hybrid_flow_method": "RAFT", + "hybrid_composite": "None", + "hybrid_use_init_image": false, + "hybrid_comp_mask_type": "None", + "hybrid_comp_mask_inverse": false, + "hybrid_comp_mask_equalize": "None", + "hybrid_comp_mask_auto_contrast": true, + "hybrid_comp_save_extra_frames": false, + "parseq_manifest": "", + "parseq_use_deltas": true, + "use_looper": false, + "init_images": "{\n \"0\": \"https://deforum.github.io/a1/Gi1.png\",\n \"max_f/4-5\": \"https://deforum.github.io/a1/Gi2.png\",\n \"max_f/2-10\": \"https://deforum.github.io/a1/Gi3.png\",\n \"3*max_f/4-15\": \"https://deforum.github.io/a1/Gi4.jpg\",\n \"max_f-20\": \"https://deforum.github.io/a1/Gi1.png\"\n}", + "image_strength_schedule": "0:(0.75)", + "blendFactorMax": "0:(0.35)", + "blendFactorSlope": "0:(0.25)", + "tweening_frames_schedule": "0:(20)", + "color_correction_factor": "0:(0.075)", + "cn_1_overwrite_frames": true, + "cn_1_vid_path": "", + "cn_1_mask_vid_path": "", + "cn_1_enabled": false, + "cn_1_low_vram": false, + "cn_1_pixel_perfect": false, + "cn_1_module": "none", + "cn_1_model": "control_v11f1p_sd15_depth [cfd03158]", + "cn_1_weight": "0:(1)", + "cn_1_guidance_start": "0:(0.0)", + "cn_1_guidance_end": "0:(1.0)", + "cn_1_processor_res": 64, + "cn_1_threshold_a": 64, + "cn_1_threshold_b": 64, + "cn_1_resize_mode": "Inner Fit (Scale to Fit)", + "cn_1_control_mode": "Balanced", + "cn_1_loopback_mode": false, + "cn_2_overwrite_frames": true, + "cn_2_vid_path": "", + "cn_2_mask_vid_path": "", + "cn_2_enabled": false, + "cn_2_low_vram": false, + "cn_2_pixel_perfect": false, + "cn_2_module": "none", + "cn_2_model": "control_v11p_sd15_seg [e1f51eb9]", + "cn_2_weight": "0:(1)", + "cn_2_guidance_start": "0:(0.0)", + "cn_2_guidance_end": "0:(1.0)", + "cn_2_processor_res": 64, + "cn_2_threshold_a": 64, + "cn_2_threshold_b": 64, + "cn_2_resize_mode": "Inner Fit (Scale to Fit)", + "cn_2_control_mode": "Balanced", + "cn_2_loopback_mode": false, + "cn_3_overwrite_frames": true, + "cn_3_vid_path": "", + "cn_3_mask_vid_path": "", + "cn_3_enabled": false, + "cn_3_low_vram": false, + "cn_3_pixel_perfect": false, + "cn_3_module": "none", + "cn_3_model": "None", + "cn_3_weight": "0:(1)", + "cn_3_guidance_start": "0:(0.0)", + "cn_3_guidance_end": "0:(1.0)", + "cn_3_processor_res": 64, + "cn_3_threshold_a": 64, + "cn_3_threshold_b": 64, + "cn_3_resize_mode": "Inner Fit (Scale to Fit)", + "cn_3_control_mode": "Balanced", + "cn_3_loopback_mode": false, + "cn_4_overwrite_frames": true, + "cn_4_vid_path": "", + "cn_4_mask_vid_path": "", + "cn_4_enabled": false, + "cn_4_low_vram": false, + "cn_4_pixel_perfect": false, + "cn_4_module": "none", + "cn_4_model": "None", + "cn_4_weight": "0:(1)", + "cn_4_guidance_start": "0:(0.0)", + "cn_4_guidance_end": "0:(1.0)", + "cn_4_processor_res": 64, + "cn_4_threshold_a": 64, + "cn_4_threshold_b": 64, + "cn_4_resize_mode": "Inner Fit (Scale to Fit)", + "cn_4_control_mode": "Balanced", + "cn_4_loopback_mode": false, + "cn_5_overwrite_frames": true, + "cn_5_vid_path": "", + "cn_5_mask_vid_path": "", + "cn_5_enabled": false, + "cn_5_low_vram": false, + "cn_5_pixel_perfect": false, + "cn_5_module": "none", + "cn_5_model": "None", + "cn_5_weight": "0:(1)", + "cn_5_guidance_start": "0:(0.0)", + "cn_5_guidance_end": "0:(1.0)", + "cn_5_processor_res": 64, + "cn_5_threshold_a": 64, + "cn_5_threshold_b": 64, + "cn_5_resize_mode": "Inner Fit (Scale to Fit)", + "cn_5_control_mode": "Balanced", + "cn_5_loopback_mode": false, + "skip_video_creation": false, + "fps": 20, + "make_gif": false, + "delete_imgs": false, + "delete_input_frames": false, + "add_soundtrack": "None", + "soundtrack_path": "", + "r_upscale_video": false, + "r_upscale_factor": "x4", + "r_upscale_model": "realesrgan-x4plus", + "r_upscale_keep_imgs": true, + "store_frames_in_ram": false, + "frame_interpolation_engine": "None", + "frame_interpolation_x_amount": 3, + "frame_interpolation_slow_mo_enabled": false, + "frame_interpolation_slow_mo_amount": 2, + "frame_interpolation_keep_imgs": false, + "frame_interpolation_use_upscaled": false +} \ No newline at end of file diff --git a/extensions-builtin/sd-webui-deforum/tests/utils.py b/extensions-builtin/sd-webui-deforum/tests/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a11e1f83745098fedcb54f675077dc2c51521c51 --- /dev/null +++ b/extensions-builtin/sd-webui-deforum/tests/utils.py @@ -0,0 +1,71 @@ +# Copyright (C) 2023 Deforum LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, version 3 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Contact the authors: https://deforum.github.io/ + +from tenacity import retry, stop_after_delay, wait_fixed +from pydantic_requests import PydanticSession +import requests +from scripts.deforum_api_models import DeforumJobStatus, DeforumJobStatusCategory, DeforumJobPhase + +SERVER_BASE_URL = "http://localhost:7860" +API_ROOT = "/deforum_api" +API_BASE_URL = SERVER_BASE_URL + API_ROOT + +@retry(wait=wait_fixed(2), stop=stop_after_delay(900)) +def wait_for_job_to_complete(id : str): + with PydanticSession( + {200: DeforumJobStatus}, headers={"accept": "application/json"} + ) as session: + response = session.get(API_BASE_URL+"/jobs/"+id) + response.raise_for_status() + jobStatus : DeforumJobStatus = response.model + print(f"Waiting for job {id}: status={jobStatus.status}; phase={jobStatus.phase}; execution_time:{jobStatus.execution_time}s") + assert jobStatus.status != DeforumJobStatusCategory.ACCEPTED + return jobStatus + +@retry(wait=wait_fixed(1), stop=stop_after_delay(120)) +def wait_for_job_to_enter_phase(id : str, phase : DeforumJobPhase): + with PydanticSession( + {200: DeforumJobStatus}, headers={"accept": "application/json"} + ) as session: + response = session.get(API_BASE_URL+"/jobs/"+id) + response.raise_for_status() + jobStatus : DeforumJobStatus = response.model + print(f"Waiting for job {id} to enter phase {phase}. Currently: status={jobStatus.status}; phase={jobStatus.phase}; execution_time:{jobStatus.execution_time}s") + assert jobStatus.phase != phase + return jobStatus + +@retry(wait=wait_fixed(1), stop=stop_after_delay(120)) +def wait_for_job_to_enter_status(id : str, status : DeforumJobStatusCategory): + with PydanticSession( + {200: DeforumJobStatus}, headers={"accept": "application/json"} + ) as session: + response = session.get(API_BASE_URL+"/jobs/"+id) + response.raise_for_status() + jobStatus : DeforumJobStatus = response.model + print(f"Waiting for job {id} to enter status {status}. Currently: status={jobStatus.status}; phase={jobStatus.phase}; execution_time:{jobStatus.execution_time}s") + assert jobStatus.status == status + return jobStatus + + +def gpu_disabled(): + response = requests.get(SERVER_BASE_URL+"/sdapi/v1/cmd-flags") + response.raise_for_status() + cmd_flags = response.json() + return cmd_flags["use_cpu"] == ["all"] + + + +