diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..c9783383588d7ab83d7f78cee2c502273e99da1d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +.venv/bin/python filter=lfs diff=lfs merge=lfs -text +.venv/bin/python3 filter=lfs diff=lfs merge=lfs -text +.venv/bin/python3.12 filter=lfs diff=lfs merge=lfs -text +.venv/bin/ruff filter=lfs diff=lfs merge=lfs -text +demo_examples/img_2_original.png filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..1490075bcb0a8258ce1b291068aa7d5bda0bc6cc --- /dev/null +++ b/.gitignore @@ -0,0 +1,41 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Virtual environments +venv/ +env/ +ENV/ + +# IDE +.vscode/ +.idea/ +*.swp +*.swo + +# OS +.DS_Store +Thumbs.db + +# HuggingFace +.cache/ + +.env diff --git a/.gradio/certificate.pem b/.gradio/certificate.pem new file mode 100644 index 0000000000000000000000000000000000000000..b85c8037f6b60976b2546fdbae88312c5246d9a3 --- /dev/null +++ b/.gradio/certificate.pem @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- diff --git a/.huggingface.yml b/.huggingface.yml new file mode 100644 index 0000000000000000000000000000000000000000..50a38e2928131ee40bfcb74fffd632635d27f64e --- /dev/null +++ b/.huggingface.yml @@ -0,0 +1,17 @@ +title: "PANDORA Object Removal" +emoji: "🎨" +colorFrom: blue +colorTo: purple +sdk: gradio +sdk_version: "4.0.0" +app_file: app.py +pinned: false +license: mit +tags: + - object-removal + - inpainting + - diffusion + - stable-diffusion + - computer-vision + - image-processing + diff --git a/.venv/.gitignore b/.venv/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f59ec20aabf5842d237244ece8c81ab184faeac1 --- /dev/null +++ b/.venv/.gitignore @@ -0,0 +1 @@ +* \ No newline at end of file diff --git a/.venv/CACHEDIR.TAG b/.venv/CACHEDIR.TAG new file mode 100644 index 0000000000000000000000000000000000000000..bc1ecb967a482524e7736038de0df6e08f9ee452 --- /dev/null +++ b/.venv/CACHEDIR.TAG @@ -0,0 +1 @@ +Signature: 8a477f597d28d172789f06886806bc55 \ No newline at end of file diff --git a/.venv/bin/accelerate b/.venv/bin/accelerate new file mode 100644 index 0000000000000000000000000000000000000000..ec424bbdce3cebf534065087151330ead689047d --- /dev/null +++ b/.venv/bin/accelerate @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from accelerate.commands.accelerate_cli import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/accelerate-config b/.venv/bin/accelerate-config new file mode 100644 index 0000000000000000000000000000000000000000..5770fd6728a1ff5cc93df0425b79deecad4c856f --- /dev/null +++ b/.venv/bin/accelerate-config @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from accelerate.commands.config import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/accelerate-estimate-memory b/.venv/bin/accelerate-estimate-memory new file mode 100644 index 0000000000000000000000000000000000000000..ac2fa53325256f1c6c0a8a68194c26b386069412 --- /dev/null +++ b/.venv/bin/accelerate-estimate-memory @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from accelerate.commands.estimate import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/accelerate-launch b/.venv/bin/accelerate-launch new file mode 100644 index 0000000000000000000000000000000000000000..2d8722c864a7ba305414f1e048a9d16c69024713 --- /dev/null +++ b/.venv/bin/accelerate-launch @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from accelerate.commands.launch import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/accelerate-merge-weights b/.venv/bin/accelerate-merge-weights new file mode 100644 index 0000000000000000000000000000000000000000..ba21069654801fad5ff61650c6b326f05d7df643 --- /dev/null +++ b/.venv/bin/accelerate-merge-weights @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from accelerate.commands.merge import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/activate b/.venv/bin/activate new file mode 100644 index 0000000000000000000000000000000000000000..997c5f60178fd9e9ec43e4e040bfd9b1570fff4d --- /dev/null +++ b/.venv/bin/activate @@ -0,0 +1,130 @@ +# Copyright (c) 2020-202x The virtualenv developers +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +if ! [ -z "${SCRIPT_PATH+_}" ] ; then + _OLD_SCRIPT_PATH="$SCRIPT_PATH" +fi + +# Get script path (only used if environment is relocatable). +if [ -n "${BASH_VERSION:+x}" ] ; then + SCRIPT_PATH="${BASH_SOURCE[0]}" + if [ "$SCRIPT_PATH" = "$0" ]; then + # Only bash has a reasonably robust check for source'dness. + echo "You must source this script: \$ source $0" >&2 + exit 33 + fi +elif [ -n "${ZSH_VERSION:+x}" ] ; then + SCRIPT_PATH="${(%):-%x}" +elif [ -n "${KSH_VERSION:+x}" ] ; then + SCRIPT_PATH="${.sh.file}" +fi + +deactivate () { + unset -f pydoc >/dev/null 2>&1 || true + + # reset old environment variables + # ! [ -z ${VAR+_} ] returns true if VAR is declared at all + if ! [ -z "${_OLD_VIRTUAL_PATH:+_}" ] ; then + PATH="$_OLD_VIRTUAL_PATH" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then + PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # The hash command must be called to get it to forget past + # commands. Without forgetting past commands the $PATH changes + # we made may not be respected + hash -r 2>/dev/null + + if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then + PS1="$_OLD_VIRTUAL_PS1" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV='/raid/hvtham/nvloc/pandora-removal/.venv' +if ([ "$OSTYPE" = "cygwin" ] || [ "$OSTYPE" = "msys" ]) && $(command -v cygpath &> /dev/null) ; then + VIRTUAL_ENV=$(cygpath -u "$VIRTUAL_ENV") +fi +export VIRTUAL_ENV + +# Unset the `SCRIPT_PATH` variable, now that the `VIRTUAL_ENV` variable +# has been set. This is important for relocatable environments. +if ! [ -z "${_OLD_SCRIPT_PATH+_}" ] ; then + SCRIPT_PATH="$_OLD_SCRIPT_PATH" + export SCRIPT_PATH + unset _OLD_SCRIPT_PATH +else + unset SCRIPT_PATH +fi + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +if [ "xpandora-removal" != x ] ; then + VIRTUAL_ENV_PROMPT="pandora-removal" +else + VIRTUAL_ENV_PROMPT=$(basename "$VIRTUAL_ENV") +fi +export VIRTUAL_ENV_PROMPT + +# unset PYTHONHOME if set +if ! [ -z "${PYTHONHOME+_}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1-}" + PS1="(${VIRTUAL_ENV_PROMPT}) ${PS1-}" + export PS1 +fi + +# Make sure to unalias pydoc if it's already there +alias pydoc 2>/dev/null >/dev/null && unalias pydoc || true + +pydoc () { + python -m pydoc "$@" +} + +# The hash command must be called to get it to forget past +# commands. Without forgetting past commands the $PATH changes +# we made may not be respected +hash -r 2>/dev/null diff --git a/.venv/bin/activate.bat b/.venv/bin/activate.bat new file mode 100644 index 0000000000000000000000000000000000000000..fa0b54d7fc8a3892ad15e24f00d595c04b23caec --- /dev/null +++ b/.venv/bin/activate.bat @@ -0,0 +1,71 @@ +@REM Copyright (c) 2020-202x The virtualenv developers +@REM +@REM Permission is hereby granted, free of charge, to any person obtaining +@REM a copy of this software and associated documentation files (the +@REM "Software"), to deal in the Software without restriction, including +@REM without limitation the rights to use, copy, modify, merge, publish, +@REM distribute, sublicense, and/or sell copies of the Software, and to +@REM permit persons to whom the Software is furnished to do so, subject to +@REM the following conditions: +@REM +@REM The above copyright notice and this permission notice shall be +@REM included in all copies or substantial portions of the Software. +@REM +@REM THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +@REM EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +@REM MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +@REM NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +@REM LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +@REM OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +@REM WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@REM This file is UTF-8 encoded, so we need to update the current code page while executing it +@for /f "tokens=2 delims=:." %%a in ('"%SystemRoot%\System32\chcp.com"') do @set _OLD_CODEPAGE=%%a +@if defined _OLD_CODEPAGE ( + @"%SystemRoot%\System32\chcp.com" 65001 > nul +) + +@for %%i in ("/raid/hvtham/nvloc/pandora-removal/.venv") do @set "VIRTUAL_ENV=%%~fi" + +@set "VIRTUAL_ENV_PROMPT=pandora-removal" +@if NOT DEFINED VIRTUAL_ENV_PROMPT ( + @for %%d in ("%VIRTUAL_ENV%") do @set "VIRTUAL_ENV_PROMPT=%%~nxd" +) + +@if defined _OLD_VIRTUAL_PROMPT ( + @set "PROMPT=%_OLD_VIRTUAL_PROMPT%" +) else ( + @if not defined PROMPT ( + @set "PROMPT=$P$G" + ) + @if not defined VIRTUAL_ENV_DISABLE_PROMPT ( + @set "_OLD_VIRTUAL_PROMPT=%PROMPT%" + ) +) +@if not defined VIRTUAL_ENV_DISABLE_PROMPT ( + @set "PROMPT=(%VIRTUAL_ENV_PROMPT%) %PROMPT%" +) + +@REM Don't use () to avoid problems with them in %PATH% +@if defined _OLD_VIRTUAL_PYTHONHOME @goto ENDIFVHOME + @set "_OLD_VIRTUAL_PYTHONHOME=%PYTHONHOME%" +:ENDIFVHOME + +@set PYTHONHOME= + +@REM if defined _OLD_VIRTUAL_PATH ( +@if not defined _OLD_VIRTUAL_PATH @goto ENDIFVPATH1 + @set "PATH=%_OLD_VIRTUAL_PATH%" +:ENDIFVPATH1 +@REM ) else ( +@if defined _OLD_VIRTUAL_PATH @goto ENDIFVPATH2 + @set "_OLD_VIRTUAL_PATH=%PATH%" +:ENDIFVPATH2 + +@set "PATH=%VIRTUAL_ENV%\bin;%PATH%" + +:END +@if defined _OLD_CODEPAGE ( + @"%SystemRoot%\System32\chcp.com" %_OLD_CODEPAGE% > nul + @set _OLD_CODEPAGE= +) diff --git a/.venv/bin/activate.csh b/.venv/bin/activate.csh new file mode 100644 index 0000000000000000000000000000000000000000..d0fdf94690be0f76ef4a89490eba80df05c99298 --- /dev/null +++ b/.venv/bin/activate.csh @@ -0,0 +1,76 @@ +# Copyright (c) 2020-202x The virtualenv developers +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . + +set newline='\ +' + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH:q" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT:q" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV '/raid/hvtham/nvloc/pandora-removal/.venv' + +set _OLD_VIRTUAL_PATH="$PATH:q" +setenv PATH "$VIRTUAL_ENV:q/bin:$PATH:q" + + + +if ('pandora-removal' != "") then + setenv VIRTUAL_ENV_PROMPT 'pandora-removal' +else + setenv VIRTUAL_ENV_PROMPT "$VIRTUAL_ENV:t:q" +endif + +if ( $?VIRTUAL_ENV_DISABLE_PROMPT ) then + if ( $VIRTUAL_ENV_DISABLE_PROMPT == "" ) then + set do_prompt = "1" + else + set do_prompt = "0" + endif +else + set do_prompt = "1" +endif + +if ( $do_prompt == "1" ) then + # Could be in a non-interactive environment, + # in which case, $prompt is undefined and we wouldn't + # care about the prompt anyway. + if ( $?prompt ) then + set _OLD_VIRTUAL_PROMPT="$prompt:q" + if ( "$prompt:q" =~ *"$newline:q"* ) then + : + else + set prompt = '('"$VIRTUAL_ENV_PROMPT:q"') '"$prompt:q" + endif + endif +endif + +unset env_name +unset do_prompt + +alias pydoc python -m pydoc + +rehash diff --git a/.venv/bin/activate.fish b/.venv/bin/activate.fish new file mode 100644 index 0000000000000000000000000000000000000000..0d60db85b6306b160485d00ee3f1236ff202b73b --- /dev/null +++ b/.venv/bin/activate.fish @@ -0,0 +1,124 @@ +# Copyright (c) 2020-202x The virtualenv developers +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*. +# Do not run it directly. + +function _bashify_path -d "Converts a fish path to something bash can recognize" + set fishy_path $argv + set bashy_path $fishy_path[1] + for path_part in $fishy_path[2..-1] + set bashy_path "$bashy_path:$path_part" + end + echo $bashy_path +end + +function _fishify_path -d "Converts a bash path to something fish can recognize" + echo $argv | tr ':' '\n' +end + +function deactivate -d 'Exit virtualenv mode and return to the normal environment.' + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + # https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling + if test (echo $FISH_VERSION | head -c 1) -lt 3 + set -gx PATH (_fishify_path "$_OLD_VIRTUAL_PATH") + else + set -gx PATH $_OLD_VIRTUAL_PATH + end + set -e _OLD_VIRTUAL_PATH + end + + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME "$_OLD_VIRTUAL_PYTHONHOME" + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + and functions -q _old_fish_prompt + # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`. + set -l fish_function_path + + # Erase virtualenv's `fish_prompt` and restore the original. + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + set -e _OLD_FISH_PROMPT_OVERRIDE + end + + set -e VIRTUAL_ENV + set -e VIRTUAL_ENV_PROMPT + + if test "$argv[1]" != 'nondestructive' + # Self-destruct! + functions -e pydoc + functions -e deactivate + functions -e _bashify_path + functions -e _fishify_path + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV '/raid/hvtham/nvloc/pandora-removal/.venv' + +# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling +if test (echo $FISH_VERSION | head -c 1) -lt 3 + set -gx _OLD_VIRTUAL_PATH (_bashify_path $PATH) +else + set -gx _OLD_VIRTUAL_PATH $PATH +end +set -gx PATH "$VIRTUAL_ENV"'/bin' $PATH + +# Prompt override provided? +# If not, just use the environment name. +if test -n 'pandora-removal' + set -gx VIRTUAL_ENV_PROMPT 'pandora-removal' +else + set -gx VIRTUAL_ENV_PROMPT (basename "$VIRTUAL_ENV") +end + +# Unset `$PYTHONHOME` if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +function pydoc + python -m pydoc $argv +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # Copy the current `fish_prompt` function as `_old_fish_prompt`. + functions -c fish_prompt _old_fish_prompt + + function fish_prompt + # Run the user's prompt first; it might depend on (pipe)status. + set -l prompt (_old_fish_prompt) + + printf '(%s) ' $VIRTUAL_ENV_PROMPT + + string join -- \n $prompt # handle multi-line prompts + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" +end diff --git a/.venv/bin/activate.nu b/.venv/bin/activate.nu new file mode 100644 index 0000000000000000000000000000000000000000..75d4705b87a13cd4e1c1ba1a99ee821514aff530 --- /dev/null +++ b/.venv/bin/activate.nu @@ -0,0 +1,117 @@ +# Copyright (c) 2020-202x The virtualenv developers +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# virtualenv activation module +# Activate with `overlay use activate.nu` +# Deactivate with `deactivate`, as usual +# +# To customize the overlay name, you can call `overlay use activate.nu as foo`, +# but then simply `deactivate` won't work because it is just an alias to hide +# the "activate" overlay. You'd need to call `overlay hide foo` manually. + +export-env { + def is-string [x] { + ($x | describe) == 'string' + } + + def has-env [...names] { + $names | each {|n| + $n in $env + } | all {|i| $i == true} + } + + # Emulates a `test -z`, but better as it handles e.g 'false' + def is-env-true [name: string] { + if (has-env $name) { + # Try to parse 'true', '0', '1', and fail if not convertible + let parsed = (do -i { $env | get $name | into bool }) + if ($parsed | describe) == 'bool' { + $parsed + } else { + not ($env | get -i $name | is-empty) + } + } else { + false + } + } + + let virtual_env = '/raid/hvtham/nvloc/pandora-removal/.venv' + let bin = 'bin' + + let is_windows = ($nu.os-info.family) == 'windows' + let path_name = (if (has-env 'Path') { + 'Path' + } else { + 'PATH' + } + ) + + let venv_path = ([$virtual_env $bin] | path join) + let new_path = ($env | get $path_name | prepend $venv_path) + + # If there is no default prompt, then use the env name instead + let virtual_env_prompt = (if ('pandora-removal' | is-empty) { + ($virtual_env | path basename) + } else { + 'pandora-removal' + }) + + let new_env = { + $path_name : $new_path + VIRTUAL_ENV : $virtual_env + VIRTUAL_ENV_PROMPT : $virtual_env_prompt + } + + let new_env = (if (is-env-true 'VIRTUAL_ENV_DISABLE_PROMPT') { + $new_env + } else { + # Creating the new prompt for the session + let virtual_prefix = $'(char lparen)($virtual_env_prompt)(char rparen) ' + + # Back up the old prompt builder + let old_prompt_command = (if (has-env 'PROMPT_COMMAND') { + $env.PROMPT_COMMAND + } else { + '' + }) + + let new_prompt = (if (has-env 'PROMPT_COMMAND') { + if 'closure' in ($old_prompt_command | describe) { + {|| $'($virtual_prefix)(do $old_prompt_command)' } + } else { + {|| $'($virtual_prefix)($old_prompt_command)' } + } + } else { + {|| $'($virtual_prefix)' } + }) + + $new_env | merge { + PROMPT_COMMAND : $new_prompt + VIRTUAL_PREFIX : $virtual_prefix + } + }) + + # Environment variables that will be loaded as the virtual env + load-env $new_env +} + +export alias pydoc = python -m pydoc +export alias deactivate = overlay hide activate diff --git a/.venv/bin/activate.ps1 b/.venv/bin/activate.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..29415553f24d0b6c051cd1d7efdc18d703e1af46 --- /dev/null +++ b/.venv/bin/activate.ps1 @@ -0,0 +1,82 @@ +# Copyright (c) 2020-202x The virtualenv developers +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +$script:THIS_PATH = $myinvocation.mycommand.path +$script:BASE_DIR = Split-Path (Resolve-Path "$THIS_PATH/..") -Parent + +function global:deactivate([switch] $NonDestructive) { + if (Test-Path variable:_OLD_VIRTUAL_PATH) { + $env:PATH = $variable:_OLD_VIRTUAL_PATH + Remove-Variable "_OLD_VIRTUAL_PATH" -Scope global + } + + if (Test-Path function:_old_virtual_prompt) { + $function:prompt = $function:_old_virtual_prompt + Remove-Item function:\_old_virtual_prompt + } + + if ($env:VIRTUAL_ENV) { + Remove-Item env:VIRTUAL_ENV -ErrorAction SilentlyContinue + } + + if ($env:VIRTUAL_ENV_PROMPT) { + Remove-Item env:VIRTUAL_ENV_PROMPT -ErrorAction SilentlyContinue + } + + if (!$NonDestructive) { + # Self destruct! + Remove-Item function:deactivate + Remove-Item function:pydoc + } +} + +function global:pydoc { + python -m pydoc $args +} + +# unset irrelevant variables +deactivate -nondestructive + +$VIRTUAL_ENV = $BASE_DIR +$env:VIRTUAL_ENV = $VIRTUAL_ENV + +if ("pandora-removal" -ne "") { + $env:VIRTUAL_ENV_PROMPT = "pandora-removal" +} +else { + $env:VIRTUAL_ENV_PROMPT = $( Split-Path $env:VIRTUAL_ENV -Leaf ) +} + +New-Variable -Scope global -Name _OLD_VIRTUAL_PATH -Value $env:PATH + +$env:PATH = "$env:VIRTUAL_ENV/bin:" + $env:PATH +if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) { + function global:_old_virtual_prompt { + "" + } + $function:_old_virtual_prompt = $function:prompt + + function global:prompt { + # Add the custom prefix to the existing prompt + $previous_prompt_value = & $function:_old_virtual_prompt + ("(" + $env:VIRTUAL_ENV_PROMPT + ") " + $previous_prompt_value) + } +} diff --git a/.venv/bin/activate_this.py b/.venv/bin/activate_this.py new file mode 100644 index 0000000000000000000000000000000000000000..a6810d12efe6cd73e74d9e79ff824c8605e622ca --- /dev/null +++ b/.venv/bin/activate_this.py @@ -0,0 +1,59 @@ +# Copyright (c) 2020-202x The virtualenv developers +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +""" +Activate virtualenv for current interpreter: + +import runpy +runpy.run_path(this_file) + +This can be used when you must use an existing Python interpreter, not the virtualenv bin/python. +""" # noqa: D415 + +from __future__ import annotations + +import os +import site +import sys + +try: + abs_file = os.path.abspath(__file__) +except NameError as exc: + msg = "You must use import runpy; runpy.run_path(this_file)" + raise AssertionError(msg) from exc + +bin_dir = os.path.dirname(abs_file) +base = bin_dir[: -len("bin") - 1] # strip away the bin part from the __file__, plus the path separator + +# prepend bin to PATH (this file is inside the bin directory) +os.environ["PATH"] = os.pathsep.join([bin_dir, *os.environ.get("PATH", "").split(os.pathsep)]) +os.environ["VIRTUAL_ENV"] = base # virtual env is right above bin directory +os.environ["VIRTUAL_ENV_PROMPT"] = "pandora-removal" or os.path.basename(base) # noqa: SIM222 + +# add the virtual environments libraries to the host python import mechanism +prev_length = len(sys.path) +for lib in "../lib/python3.12/site-packages".split(os.pathsep): + path = os.path.realpath(os.path.join(bin_dir, lib)) + site.addsitedir(path) +sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length] + +sys.real_prefix = sys.prefix +sys.prefix = base diff --git a/.venv/bin/deactivate.bat b/.venv/bin/deactivate.bat new file mode 100644 index 0000000000000000000000000000000000000000..07041bc45129994cbb4b338f6fb61aaf502571fb --- /dev/null +++ b/.venv/bin/deactivate.bat @@ -0,0 +1,39 @@ +@REM Copyright (c) 2020-202x The virtualenv developers +@REM +@REM Permission is hereby granted, free of charge, to any person obtaining +@REM a copy of this software and associated documentation files (the +@REM "Software"), to deal in the Software without restriction, including +@REM without limitation the rights to use, copy, modify, merge, publish, +@REM distribute, sublicense, and/or sell copies of the Software, and to +@REM permit persons to whom the Software is furnished to do so, subject to +@REM the following conditions: +@REM +@REM The above copyright notice and this permission notice shall be +@REM included in all copies or substantial portions of the Software. +@REM +@REM THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +@REM EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +@REM MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +@REM NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +@REM LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +@REM OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +@REM WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@set VIRTUAL_ENV= +@set VIRTUAL_ENV_PROMPT= + +@REM Don't use () to avoid problems with them in %PATH% +@if not defined _OLD_VIRTUAL_PROMPT @goto ENDIFVPROMPT + @set "PROMPT=%_OLD_VIRTUAL_PROMPT%" + @set _OLD_VIRTUAL_PROMPT= +:ENDIFVPROMPT + +@if not defined _OLD_VIRTUAL_PYTHONHOME @goto ENDIFVHOME + @set "PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%" + @set _OLD_VIRTUAL_PYTHONHOME= +:ENDIFVHOME + +@if not defined _OLD_VIRTUAL_PATH @goto ENDIFVPATH + @set "PATH=%_OLD_VIRTUAL_PATH%" + @set _OLD_VIRTUAL_PATH= +:ENDIFVPATH \ No newline at end of file diff --git a/.venv/bin/diffusers-cli b/.venv/bin/diffusers-cli new file mode 100644 index 0000000000000000000000000000000000000000..f2b5991b375c18b07b3ea911560075ff34f45174 --- /dev/null +++ b/.venv/bin/diffusers-cli @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from diffusers.commands.diffusers_cli import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/f2py b/.venv/bin/f2py new file mode 100644 index 0000000000000000000000000000000000000000..523fe938bc4c98d0aff7560a5af4b70b680da52d --- /dev/null +++ b/.venv/bin/f2py @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from numpy.f2py.f2py2e import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/fastapi b/.venv/bin/fastapi new file mode 100644 index 0000000000000000000000000000000000000000..7d625f2f67e719e28d30f856c85b40048745beee --- /dev/null +++ b/.venv/bin/fastapi @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from fastapi.cli import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/gradio b/.venv/bin/gradio new file mode 100644 index 0000000000000000000000000000000000000000..789d13f2028dedcdbc016c0ac305c577499e7bbb --- /dev/null +++ b/.venv/bin/gradio @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from gradio.cli import cli +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(cli()) diff --git a/.venv/bin/hf b/.venv/bin/hf new file mode 100644 index 0000000000000000000000000000000000000000..d06214d3c44d5663c08e922000370ebcda99a486 --- /dev/null +++ b/.venv/bin/hf @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from huggingface_hub.cli.hf import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/httpx b/.venv/bin/httpx new file mode 100644 index 0000000000000000000000000000000000000000..054be0a4496524459326fe56b23ba21c2eaf1b0d --- /dev/null +++ b/.venv/bin/httpx @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from httpx import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/huggingface-cli b/.venv/bin/huggingface-cli new file mode 100644 index 0000000000000000000000000000000000000000..f694a094fac70c483ba8e08040b26ea836e26503 --- /dev/null +++ b/.venv/bin/huggingface-cli @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from huggingface_hub.commands.huggingface_cli import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/isympy b/.venv/bin/isympy new file mode 100644 index 0000000000000000000000000000000000000000..abcf56204bea105a5f2cf58e59b0f33ec40d6568 --- /dev/null +++ b/.venv/bin/isympy @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from isympy import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/markdown-it b/.venv/bin/markdown-it new file mode 100644 index 0000000000000000000000000000000000000000..656308a7cb06b16fbb31e46b8ff7c778d31655ca --- /dev/null +++ b/.venv/bin/markdown-it @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from markdown_it.cli.parse import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/normalizer b/.venv/bin/normalizer new file mode 100644 index 0000000000000000000000000000000000000000..ba384c96ac2bf03b624ce19d87d034560fde00f4 --- /dev/null +++ b/.venv/bin/normalizer @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from charset_normalizer.cli import cli_detect +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(cli_detect()) diff --git a/.venv/bin/numpy-config b/.venv/bin/numpy-config new file mode 100644 index 0000000000000000000000000000000000000000..fc47c860333b0213346629030da9008db85f79bd --- /dev/null +++ b/.venv/bin/numpy-config @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from numpy._configtool import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/pip b/.venv/bin/pip new file mode 100644 index 0000000000000000000000000000000000000000..7cf8dda7a4b9e41c4b32111890d7bee074e2035e --- /dev/null +++ b/.venv/bin/pip @@ -0,0 +1,10 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +# -*- coding: utf-8 -*- +import sys +from pip._internal.cli.main import main +if __name__ == "__main__": + if sys.argv[0].endswith("-script.pyw"): + sys.argv[0] = sys.argv[0][:-11] + elif sys.argv[0].endswith(".exe"): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/pip3 b/.venv/bin/pip3 new file mode 100644 index 0000000000000000000000000000000000000000..7cf8dda7a4b9e41c4b32111890d7bee074e2035e --- /dev/null +++ b/.venv/bin/pip3 @@ -0,0 +1,10 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +# -*- coding: utf-8 -*- +import sys +from pip._internal.cli.main import main +if __name__ == "__main__": + if sys.argv[0].endswith("-script.pyw"): + sys.argv[0] = sys.argv[0][:-11] + elif sys.argv[0].endswith(".exe"): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/pip3.12 b/.venv/bin/pip3.12 new file mode 100644 index 0000000000000000000000000000000000000000..7cf8dda7a4b9e41c4b32111890d7bee074e2035e --- /dev/null +++ b/.venv/bin/pip3.12 @@ -0,0 +1,10 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +# -*- coding: utf-8 -*- +import sys +from pip._internal.cli.main import main +if __name__ == "__main__": + if sys.argv[0].endswith("-script.pyw"): + sys.argv[0] = sys.argv[0][:-11] + elif sys.argv[0].endswith(".exe"): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/proton b/.venv/bin/proton new file mode 100644 index 0000000000000000000000000000000000000000..0b52c055535572cfbbf2dff4cf4eb361439566fb --- /dev/null +++ b/.venv/bin/proton @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from triton.profiler.proton import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/proton-viewer b/.venv/bin/proton-viewer new file mode 100644 index 0000000000000000000000000000000000000000..bb5ba83f6ecc52082f0e965772896f326251713f --- /dev/null +++ b/.venv/bin/proton-viewer @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from triton.profiler.viewer import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/pydoc.bat b/.venv/bin/pydoc.bat new file mode 100644 index 0000000000000000000000000000000000000000..daa20590b181e9f5a1f4f642f6c4eaf471cff00f --- /dev/null +++ b/.venv/bin/pydoc.bat @@ -0,0 +1,22 @@ +@REM Copyright (c) 2020-202x The virtualenv developers +@REM +@REM Permission is hereby granted, free of charge, to any person obtaining +@REM a copy of this software and associated documentation files (the +@REM "Software"), to deal in the Software without restriction, including +@REM without limitation the rights to use, copy, modify, merge, publish, +@REM distribute, sublicense, and/or sell copies of the Software, and to +@REM permit persons to whom the Software is furnished to do so, subject to +@REM the following conditions: +@REM +@REM The above copyright notice and this permission notice shall be +@REM included in all copies or substantial portions of the Software. +@REM +@REM THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +@REM EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +@REM MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +@REM NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +@REM LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +@REM OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +@REM WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +python.exe -m pydoc %* \ No newline at end of file diff --git a/.venv/bin/pygmentize b/.venv/bin/pygmentize new file mode 100644 index 0000000000000000000000000000000000000000..1e9962a117eb4c2037da56a7c6cd0b5b165adeae --- /dev/null +++ b/.venv/bin/pygmentize @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from pygments.cmdline import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/python b/.venv/bin/python new file mode 100644 index 0000000000000000000000000000000000000000..6011e025424b1f07a26131ec56f981c9ae324e21 --- /dev/null +++ b/.venv/bin/python @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbb40286e20e24a09d4732f94005fd05abf4c704d6175f418d745144bcbbae01 +size 33390624 diff --git a/.venv/bin/python3 b/.venv/bin/python3 new file mode 100644 index 0000000000000000000000000000000000000000..6011e025424b1f07a26131ec56f981c9ae324e21 --- /dev/null +++ b/.venv/bin/python3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbb40286e20e24a09d4732f94005fd05abf4c704d6175f418d745144bcbbae01 +size 33390624 diff --git a/.venv/bin/python3.12 b/.venv/bin/python3.12 new file mode 100644 index 0000000000000000000000000000000000000000..6011e025424b1f07a26131ec56f981c9ae324e21 --- /dev/null +++ b/.venv/bin/python3.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbb40286e20e24a09d4732f94005fd05abf4c704d6175f418d745144bcbbae01 +size 33390624 diff --git a/.venv/bin/ruff b/.venv/bin/ruff new file mode 100644 index 0000000000000000000000000000000000000000..cdec57565491e5d461e970fd79bfb1b70295ee7c --- /dev/null +++ b/.venv/bin/ruff @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c0ad04e5c8df015be63cc2a9bf9588e64bf4c7613fc7fb5362fc962ae547080 +size 33943040 diff --git a/.venv/bin/tiny-agents b/.venv/bin/tiny-agents new file mode 100644 index 0000000000000000000000000000000000000000..4c7a34a03bf328b68fbdb9173d57a63063487ad4 --- /dev/null +++ b/.venv/bin/tiny-agents @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from huggingface_hub.inference._mcp.cli import app +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(app()) diff --git a/.venv/bin/torchfrtrace b/.venv/bin/torchfrtrace new file mode 100644 index 0000000000000000000000000000000000000000..7af37de78f8e372f0d4b964ec279189a0c31873f --- /dev/null +++ b/.venv/bin/torchfrtrace @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from tools.flight_recorder.fr_trace import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/torchrun b/.venv/bin/torchrun new file mode 100644 index 0000000000000000000000000000000000000000..80f2f700de438082436200ee2ef43fb9ff913a48 --- /dev/null +++ b/.venv/bin/torchrun @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from torch.distributed.run import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/tqdm b/.venv/bin/tqdm new file mode 100644 index 0000000000000000000000000000000000000000..992220a93f53b5bbb46242d93ef46a016045c637 --- /dev/null +++ b/.venv/bin/tqdm @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from tqdm.cli import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/transformers b/.venv/bin/transformers new file mode 100644 index 0000000000000000000000000000000000000000..ede4599a28009a9525ea56a4780df4078d9d4425 --- /dev/null +++ b/.venv/bin/transformers @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from transformers.commands.transformers_cli import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/transformers-cli b/.venv/bin/transformers-cli new file mode 100644 index 0000000000000000000000000000000000000000..af087523c22b9dd436d57d3005ceb0312a1b92d3 --- /dev/null +++ b/.venv/bin/transformers-cli @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from transformers.commands.transformers_cli import main_cli +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main_cli()) diff --git a/.venv/bin/typer b/.venv/bin/typer new file mode 100644 index 0000000000000000000000000000000000000000..45c687d1fb316f9bebdea3e8ae4f953cb3d116c0 --- /dev/null +++ b/.venv/bin/typer @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from typer.cli import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/upload_theme b/.venv/bin/upload_theme new file mode 100644 index 0000000000000000000000000000000000000000..1f92bcc30603f8a164e209d89a74d7b9af9791c6 --- /dev/null +++ b/.venv/bin/upload_theme @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from gradio.themes.upload_theme import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/uvicorn b/.venv/bin/uvicorn new file mode 100644 index 0000000000000000000000000000000000000000..4e010e754bb901c2f09c374c42932184993434e0 --- /dev/null +++ b/.venv/bin/uvicorn @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from uvicorn.main import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/bin/websockets b/.venv/bin/websockets new file mode 100644 index 0000000000000000000000000000000000000000..a07b08e0130da126e939fa57ddaf8dad46c8c013 --- /dev/null +++ b/.venv/bin/websockets @@ -0,0 +1,7 @@ +#!/raid/hvtham/nvloc/pandora-removal/.venv/bin/python +import sys +from websockets.cli import main +if __name__ == '__main__': + if sys.argv[0].endswith('.exe'): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/.venv/pyvenv.cfg b/.venv/pyvenv.cfg new file mode 100644 index 0000000000000000000000000000000000000000..4e65d8a89456f6a4e0600962a0531e1085302443 --- /dev/null +++ b/.venv/pyvenv.cfg @@ -0,0 +1,7 @@ +home = /home/hvtham/.local/share/uv/python/cpython-3.12.11-linux-x86_64-gnu/bin +implementation = CPython +uv = 0.7.19 +version_info = 3.12.11 +include-system-site-packages = false +seed = true +prompt = pandora-removal diff --git a/.venv/share/man/man1/isympy.1 b/.venv/share/man/man1/isympy.1 new file mode 100644 index 0000000000000000000000000000000000000000..0ff966158a28c5ad1a6cd954e454842b25fdd999 --- /dev/null +++ b/.venv/share/man/man1/isympy.1 @@ -0,0 +1,188 @@ +'\" -*- coding: us-ascii -*- +.if \n(.g .ds T< \\FC +.if \n(.g .ds T> \\F[\n[.fam]] +.de URL +\\$2 \(la\\$1\(ra\\$3 +.. +.if \n(.g .mso www.tmac +.TH isympy 1 2007-10-8 "" "" +.SH NAME +isympy \- interactive shell for SymPy +.SH SYNOPSIS +'nh +.fi +.ad l +\fBisympy\fR \kx +.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) +'in \n(.iu+\nxu +[\fB-c\fR | \fB--console\fR] [\fB-p\fR ENCODING | \fB--pretty\fR ENCODING] [\fB-t\fR TYPE | \fB--types\fR TYPE] [\fB-o\fR ORDER | \fB--order\fR ORDER] [\fB-q\fR | \fB--quiet\fR] [\fB-d\fR | \fB--doctest\fR] [\fB-C\fR | \fB--no-cache\fR] [\fB-a\fR | \fB--auto\fR] [\fB-D\fR | \fB--debug\fR] [ +-- | PYTHONOPTIONS] +'in \n(.iu-\nxu +.ad b +'hy +'nh +.fi +.ad l +\fBisympy\fR \kx +.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) +'in \n(.iu+\nxu +[ +{\fB-h\fR | \fB--help\fR} +| +{\fB-v\fR | \fB--version\fR} +] +'in \n(.iu-\nxu +.ad b +'hy +.SH DESCRIPTION +isympy is a Python shell for SymPy. It is just a normal python shell +(ipython shell if you have the ipython package installed) that executes +the following commands so that you don't have to: +.PP +.nf +\*(T< +>>> from __future__ import division +>>> from sympy import * +>>> x, y, z = symbols("x,y,z") +>>> k, m, n = symbols("k,m,n", integer=True) + \*(T> +.fi +.PP +So starting isympy is equivalent to starting python (or ipython) and +executing the above commands by hand. It is intended for easy and quick +experimentation with SymPy. For more complicated programs, it is recommended +to write a script and import things explicitly (using the "from sympy +import sin, log, Symbol, ..." idiom). +.SH OPTIONS +.TP +\*(T<\fB\-c \fR\*(T>\fISHELL\fR, \*(T<\fB\-\-console=\fR\*(T>\fISHELL\fR +Use the specified shell (python or ipython) as +console backend instead of the default one (ipython +if present or python otherwise). + +Example: isympy -c python + +\fISHELL\fR could be either +\&'ipython' or 'python' +.TP +\*(T<\fB\-p \fR\*(T>\fIENCODING\fR, \*(T<\fB\-\-pretty=\fR\*(T>\fIENCODING\fR +Setup pretty printing in SymPy. By default, the most pretty, unicode +printing is enabled (if the terminal supports it). You can use less +pretty ASCII printing instead or no pretty printing at all. + +Example: isympy -p no + +\fIENCODING\fR must be one of 'unicode', +\&'ascii' or 'no'. +.TP +\*(T<\fB\-t \fR\*(T>\fITYPE\fR, \*(T<\fB\-\-types=\fR\*(T>\fITYPE\fR +Setup the ground types for the polys. By default, gmpy ground types +are used if gmpy2 or gmpy is installed, otherwise it falls back to python +ground types, which are a little bit slower. You can manually +choose python ground types even if gmpy is installed (e.g., for testing purposes). + +Note that sympy ground types are not supported, and should be used +only for experimental purposes. + +Note that the gmpy1 ground type is primarily intended for testing; it the +use of gmpy even if gmpy2 is available. + +This is the same as setting the environment variable +SYMPY_GROUND_TYPES to the given ground type (e.g., +SYMPY_GROUND_TYPES='gmpy') + +The ground types can be determined interactively from the variable +sympy.polys.domains.GROUND_TYPES inside the isympy shell itself. + +Example: isympy -t python + +\fITYPE\fR must be one of 'gmpy', +\&'gmpy1' or 'python'. +.TP +\*(T<\fB\-o \fR\*(T>\fIORDER\fR, \*(T<\fB\-\-order=\fR\*(T>\fIORDER\fR +Setup the ordering of terms for printing. The default is lex, which +orders terms lexicographically (e.g., x**2 + x + 1). You can choose +other orderings, such as rev-lex, which will use reverse +lexicographic ordering (e.g., 1 + x + x**2). + +Note that for very large expressions, ORDER='none' may speed up +printing considerably, with the tradeoff that the order of the terms +in the printed expression will have no canonical order + +Example: isympy -o rev-lax + +\fIORDER\fR must be one of 'lex', 'rev-lex', 'grlex', +\&'rev-grlex', 'grevlex', 'rev-grevlex', 'old', or 'none'. +.TP +\*(T<\fB\-q\fR\*(T>, \*(T<\fB\-\-quiet\fR\*(T> +Print only Python's and SymPy's versions to stdout at startup, and nothing else. +.TP +\*(T<\fB\-d\fR\*(T>, \*(T<\fB\-\-doctest\fR\*(T> +Use the same format that should be used for doctests. This is +equivalent to '\fIisympy -c python -p no\fR'. +.TP +\*(T<\fB\-C\fR\*(T>, \*(T<\fB\-\-no\-cache\fR\*(T> +Disable the caching mechanism. Disabling the cache may slow certain +operations down considerably. This is useful for testing the cache, +or for benchmarking, as the cache can result in deceptive benchmark timings. + +This is the same as setting the environment variable SYMPY_USE_CACHE +to 'no'. +.TP +\*(T<\fB\-a\fR\*(T>, \*(T<\fB\-\-auto\fR\*(T> +Automatically create missing symbols. Normally, typing a name of a +Symbol that has not been instantiated first would raise NameError, +but with this option enabled, any undefined name will be +automatically created as a Symbol. This only works in IPython 0.11. + +Note that this is intended only for interactive, calculator style +usage. In a script that uses SymPy, Symbols should be instantiated +at the top, so that it's clear what they are. + +This will not override any names that are already defined, which +includes the single character letters represented by the mnemonic +QCOSINE (see the "Gotchas and Pitfalls" document in the +documentation). You can delete existing names by executing "del +name" in the shell itself. You can see if a name is defined by typing +"'name' in globals()". + +The Symbols that are created using this have default assumptions. +If you want to place assumptions on symbols, you should create them +using symbols() or var(). + +Finally, this only works in the top level namespace. So, for +example, if you define a function in isympy with an undefined +Symbol, it will not work. +.TP +\*(T<\fB\-D\fR\*(T>, \*(T<\fB\-\-debug\fR\*(T> +Enable debugging output. This is the same as setting the +environment variable SYMPY_DEBUG to 'True'. The debug status is set +in the variable SYMPY_DEBUG within isympy. +.TP +-- \fIPYTHONOPTIONS\fR +These options will be passed on to \fIipython (1)\fR shell. +Only supported when ipython is being used (standard python shell not supported). + +Two dashes (--) are required to separate \fIPYTHONOPTIONS\fR +from the other isympy options. + +For example, to run iSymPy without startup banner and colors: + +isympy -q -c ipython -- --colors=NoColor +.TP +\*(T<\fB\-h\fR\*(T>, \*(T<\fB\-\-help\fR\*(T> +Print help output and exit. +.TP +\*(T<\fB\-v\fR\*(T>, \*(T<\fB\-\-version\fR\*(T> +Print isympy version information and exit. +.SH FILES +.TP +\*(T<\fI${HOME}/.sympy\-history\fR\*(T> +Saves the history of commands when using the python +shell as backend. +.SH BUGS +The upstreams BTS can be found at \(lahttps://github.com/sympy/sympy/issues\(ra +Please report all bugs that you find in there, this will help improve +the overall quality of SymPy. +.SH "SEE ALSO" +\fBipython\fR(1), \fBpython\fR(1) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/PANDORACode/OIIctrl.py b/PANDORACode/OIIctrl.py new file mode 100644 index 0000000000000000000000000000000000000000..6d6ed60f50e2ececa12786657665707ef339e1ca --- /dev/null +++ b/PANDORACode/OIIctrl.py @@ -0,0 +1,162 @@ + +import torch +import torch.nn.functional as F +import numpy as np + +from einops import rearrange + +from .OIIctrl_utils import AttentionBase + +class OIISelfAttentionControl(AttentionBase): + def __init__(self, start_step=25, start_layer=10, layer_idx=None, step_idx=None, total_steps=50): + """ + Original Interpolate Intermediate self-attention control for Stable-Diffusion model + Args: + start_step: the step to start mutual self-attention control + start_layer: the layer to start mutual self-attention control + layer_idx: list of the layers to apply mutual self-attention control + step_idx: list the steps to apply mutual self-attention control + total_steps: the total number of steps + """ + super().__init__(total_steps) + self.total_steps = total_steps + self.start_step = start_step + self.start_layer = start_layer + self.layer_idx = layer_idx if layer_idx is not None else list(range(start_layer, 16)) + self.step_idx = step_idx if step_idx is not None else list(range(start_step, total_steps)) + print("step_idx: ", self.step_idx) + print("layer_idx: ", self.layer_idx) + + def attn_batch(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs): + b = q.shape[0] // num_heads + q = rearrange(q, "(b h) n d -> h (b n) d", h=num_heads) + k = rearrange(k, "(b h) n d -> h (b n) d", h=num_heads) + v = rearrange(v, "(b h) n d -> h (b n) d", h=num_heads) + + sim = torch.einsum("h i d, h j d -> h i j", q, k) * kwargs.get("scale") + attn = sim.softmax(-1) + out = torch.einsum("h i j, h j d -> h i d", attn, v) + out = rearrange(out, "h (b n) d -> b n (h d)", b=b) + return out + + def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs): + """ + Attention forward function + """ + return super().forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs) +class OIISelfAttentionControlMask(OIISelfAttentionControl): + def __init__(self, start_step=4, start_layer=10, layer_idx=None, + step_idx=None, total_steps=50, + mask=None,dilated_mask=None,percentile=95): + """ + Maske-guided Original Interpolate Intermediate to alleviate the problem of fore- and background confusion + Args: + start_step: the step to start mutual self-attention control + start_layer: the layer to start mutual self-attention control + layer_idx: list of the layers to apply mutual self-attention control + step_idx: list the steps to apply mutual self-attention control + total_steps: the total number of steps + mask_s: source mask with shape (h, w) + mask_t: target mask with same shape as source mask + """ + super().__init__(start_step, start_layer, layer_idx, step_idx, total_steps) + + self.mask = mask + self.dilated_mask=dilated_mask + self.percentile=percentile + def attn_mask(self, q, k, v, num_heads,**kwargs): + B = q.shape[0] // num_heads + H = W = int(np.sqrt(q.shape[1])) + + + sim = torch.einsum("h i d, h j d -> h i j", q, k) * kwargs.get("scale") + + mask = self.mask.clone().unsqueeze(0).unsqueeze(0) + mask = F.interpolate(mask, (H, W)).flatten(0).unsqueeze(0) + mask = mask.flatten().to(sim.dtype) + + + sim = sim + mask.masked_fill(mask == 1, torch.finfo(sim.dtype).min) + + + attn = sim.softmax(-1) + + out = torch.einsum("h i j, h j d -> h i d", attn, v) + out = rearrange(out, "h (b n) d -> b n (h d)", b=B, h=num_heads) + return out + def CNGD_AO(self, q, k, v, num_heads, percentile=95, **kwargs): + B = q.shape[0] // num_heads + H = W = int(np.sqrt(q.shape[1])) + + # Tính similarity giữa q và k + sim = torch.einsum("h i d, h j d -> h i j", q, k) * kwargs.get("scale") # (heads, tokens_q, tokens_k) + + # === Mask key positions có similarity cao với mỗi query === + mask_sr = self.mask.clone().unsqueeze(0).unsqueeze(0) + mask_sr = F.interpolate(mask_sr, (H, W)).flatten(0).unsqueeze(0) + mask_sr = mask_sr.flatten().to(sim.dtype) + sim_softmax = sim.masked_fill(mask_sr==1, torch.finfo(sim.dtype).min).softmax(-1) # (h, i, j), attention weights + + # mask = self.getMaskforCNGD_AO(sim_softmax) + + # Mask theo phần trăm (percentile) + # sim_softmax = sim.softmax(-1) # (h, i, j), attention weights + # breakpoint() + mask = self.threshold_by_percentile(sim_softmax, percentile) # Mask 50% similarity cao nhất + + + # Áp dụng mask để làm mờ các giá trị tương tự cao + sim = sim.masked_fill(mask==1, torch.finfo(sim.dtype).min) + sim = sim.masked_fill(mask_sr==1, torch.finfo(sim.dtype).min) + + attn = sim.softmax(-1) # Re-normalize sau khi mask + out = torch.einsum("h i j, h j d -> h i d", attn, v) + out = rearrange(out, "h (b n) d -> b n (h d)", b=B, h=num_heads) + return out + + def threshold_by_percentile(self, sim_softmax, percentile): + """ + Tạo mask theo percentile cho sim_softmax có shape (B*H, N, N). + Giữ lại các vị trí có giá trị >= threshold theo trục -1. + """ + # Tính ngưỡng theo percentile trên mỗi query row (theo trục -1) + threshold = torch.quantile(sim_softmax.float(), percentile / 100.0, dim=-1, keepdim=True) # shape: (B*H, N, 1) + + # Tạo mask: True ở những vị trí có giá trị >= threshold + mask = sim_softmax >= threshold # shape vẫn là (B*H, N, N) + return mask + + def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs): + """ + Attention forward function + """ + H = W = int(np.sqrt(q.shape[1])) + out_self_attn=super().forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads,**kwargs) + # return out_self_attn + invert=(out_self_attn.shape[0]==1) + + + if invert or is_cross or self.cur_step in self.step_idx: + return out_self_attn + + + out_source ,_=out_self_attn.chunk(2) + + + + + out_object = self.CNGD_AO(q[-num_heads:], k[:num_heads], v[:num_heads], num_heads,self.percentile, **kwargs) + out_background = self.attn_mask(q[-num_heads:], k[:num_heads], v[:num_heads], num_heads, **kwargs) + + + mask=self.mask.clone().to(out_object.device) + mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0), (H, W)) + mask = mask.reshape(-1, 1) # (hw, 1) + mask = mask.clamp(0.01, 0.99) + + out_target = out_object * mask + out_background * (1 - mask) + + + out = torch.cat([out_source ,out_target], dim=0) + + return out \ No newline at end of file diff --git a/PANDORACode/OIIctrl_utils.py b/PANDORACode/OIIctrl_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a32678119ccffb4e8d32d33f9eb6c499a1f3edd9 --- /dev/null +++ b/PANDORACode/OIIctrl_utils.py @@ -0,0 +1,132 @@ + +import torch +import torch.nn as nn +from einops import rearrange, repeat +from torchvision.io import read_image +import torch.nn.functional as F +import cv2 +from PIL import Image +import numpy as np +import io +from torchvision.utils import save_image,make_grid + + +class AttentionBase: + def __init__(self,max_step=50): + self.cur_step = 0 + self.num_att_layers = -1 + self.cur_att_layer = 0 + self.max_step=max_step + + def reset(self): + self.cur_att_layer = 0 + self.cur_step = 0 + + def after_step(self): + self.cur_att_layer = 0 + self.cur_step += 1 + self.cur_step%=self.max_step + if self.cur_step == 0: + self.reset() + def __call__(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs): + out = self.forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs) + self.cur_att_layer += 1 + if self.cur_att_layer == self.num_att_layers: + self.after_step() + return out + + def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs): + out = torch.einsum('b i j, b j d -> b i d', attn, v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=num_heads) + return out + + +def regiter_attention_editor_diffusers(model, editor: AttentionBase): + """ + Register a attention editor to Diffuser Pipeline, refer from [Prompt-to-Prompt] + """ + def ca_forward(self, place_in_unet): + def forward(x, encoder_hidden_states=None, attention_mask=None, context=None, mask=None): + """ + The attention is similar to the original implementation of LDM CrossAttention class + except adding some modifications on the attention + """ + if encoder_hidden_states is not None: + context = encoder_hidden_states + if attention_mask is not None: + mask = attention_mask + + to_out = self.to_out + if isinstance(to_out, nn.modules.container.ModuleList): + to_out = self.to_out[0] + else: + to_out = self.to_out + + h = self.heads + q = self.to_q(x) + is_cross = context is not None + context = context if is_cross else x + + k = self.to_k(context) + v = self.to_v(context) + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + sim = torch.einsum('b i d, b j d -> b i j', q, k) * self.scale + + if mask is not None: + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + mask = mask[:, None, :].repeat(h, 1, 1) + sim.masked_fill_(~mask, max_neg_value) + + attn = sim.softmax(dim=-1) + # the only difference + out = editor( + q, k, v, sim, attn, is_cross, place_in_unet, + self.heads, scale=self.scale) + + return to_out(out) + + return forward + + def register_editor(net, count, place_in_unet): + for name, subnet in net.named_children(): + if net.__class__.__name__ == 'Attention': # spatial Transformer layer + net.forward = ca_forward(net, place_in_unet) + return count + 1 + elif hasattr(net, 'children'): + count = register_editor(subnet, count, place_in_unet) + return count + + cross_att_count = 0 + for net_name, net in model.unet.named_children(): + if "down" in net_name: + cross_att_count += register_editor(net, 0, "down") + elif "mid" in net_name: + cross_att_count += register_editor(net, 0, "mid") + elif "up" in net_name: + cross_att_count += register_editor(net, 0, "up") + editor.num_att_layers = cross_att_count + + +def load_image(image_path, device): + image = read_image(image_path) + image = image[:3].unsqueeze_(0).float() / 127.5 - 1. # [-1, 1] + image = F.interpolate(image, (512, 512)) + image = image.to(device) + return image +def expand_mask(mask,scale=0.15): + + object_size = torch.sum(mask) + kernel_size = int(torch.sqrt(object_size).item()*scale) + if (kernel_size==0): return mask + source_mask_tensor = torch.tensor(mask.clone().detach(), dtype=torch.float32).unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions + + dilation = torch.ones(1, 1, kernel_size, kernel_size).to(source_mask_tensor.device) + + expanded_mask_tensor = F.conv2d(source_mask_tensor, dilation, padding=kernel_size//2) + expanded_mask_tensor = torch.where(expanded_mask_tensor > 0, torch.tensor(1.0).to(source_mask_tensor.device), torch.tensor(0.0).to(source_mask_tensor.device)) + expanded_mask = expanded_mask_tensor.squeeze().byte() + + return expanded_mask \ No newline at end of file diff --git a/PANDORACode/config.py b/PANDORACode/config.py new file mode 100644 index 0000000000000000000000000000000000000000..9b34486166c31d32868a957f0629d8ebef6642d2 --- /dev/null +++ b/PANDORACode/config.py @@ -0,0 +1,14 @@ +import torch +class Config: + SEED=10 + SCALE_MASK=0.1 + THRES_HOLD=0.25 + DEVICE = "cuda:6" if torch.cuda.is_available() else "cpu" + HEIGHT=768 + WIDTH=768 + MAX_STEP=50 + GUIDANCE_SCALE_LADG=1.3 + STEP_QUERY=45 + LAYER_QUERY=17 + STEP_CHANGE_MASK=1 + PERCENTILE=95 \ No newline at end of file diff --git a/PANDORACode/diffuser_utils.py b/PANDORACode/diffuser_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aff4efdb5c14f84522fb557db01e4b6d60efaf06 --- /dev/null +++ b/PANDORACode/diffuser_utils.py @@ -0,0 +1,1214 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +from typing import Any, Callable, Dict, List, Optional, Union +from PIL import Image +import torch +from packaging import version +from tqdm import tqdm +import torch.nn.functional as F + +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback +from diffusers.configuration_utils import FrozenDict +from diffusers.image_processor import PipelineImageInput, VaeImageProcessor +from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from diffusers.models.lora import adjust_lora_scale_text_encoder +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import ( + USE_PEFT_BACKEND, + deprecate, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from diffusers.utils.torch_utils import randn_tensor +from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionPipeline + + >>> pipe = StableDiffusionPipeline.from_pretrained( + ... "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + r""" + Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on + Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://huggingface.co/papers/2305.08891). + + Args: + noise_cfg (`torch.Tensor`): + The predicted noise tensor for the guided diffusion process. + noise_pred_text (`torch.Tensor`): + The predicted noise tensor for the text-guided diffusion process. + guidance_rescale (`float`, *optional*, defaults to 0.0): + A rescale factor applied to the noise predictions. + + Returns: + noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + r""" + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class OIICtrlPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + """ + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for + more details about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = ( + unet is not None + and hasattr(unet.config, "_diffusers_version") + and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") + ) + self._is_unet_config_sample_size_int = unet is not None and isinstance(unet.config.sample_size, int) + is_unet_sample_size_less_64 = ( + unet is not None + and hasattr(unet.config, "sample_size") + and self._is_unet_config_sample_size_int + and unet.config.sample_size < 64 + ) + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5" + " \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 0.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + latents_intermediate: List[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + guidance_scale_LADG: float= 1.6, + local_mask= None, + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only + applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + if not height or not width: + height = ( + self.unet.config.sample_size + if self._is_unet_config_sample_size_int + else self.unet.config.sample_size[0] + ) + width = ( + self.unet.config.sample_size + if self._is_unet_config_sample_size_int + else self.unet.config.sample_size[1] + ) + height, width = height * self.vae_scale_factor, width * self.vae_scale_factor + # to deal with lora scaling and other possible forward hooks + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + # if self.do_classifier_free_guidance: + + # prompt_embeds = torch.cat([prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + # num_channels_latents = self.unet.config.in_channels + # latents = self.prepare_latents( + # batch_size * num_images_per_prompt, + # num_channels_latents, + # height, + # width, + # prompt_embeds.dtype, + # device, + # generator, + # latents, + # ) + if local_mask is not None: + local_mask = local_mask.unsqueeze(0).unsqueeze(0) + local_mask = F.interpolate(local_mask, size=(latents.shape[-2], latents.shape[-1]), mode='nearest') + latents = latents * self.scheduler.init_noise_sigma + latents_intermediate[-1] = latents_intermediate[-1] * self.scheduler.init_noise_sigma + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if (ip_adapter_image is not None or ip_adapter_image_embeds is not None) + else None + ) + + # 6.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + # latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input=torch.cat([latents_intermediate[-i-1],latents[1:]]) + latents=latent_model_input.clone() + # latent_model_input=latents + + if hasattr(self.scheduler, "scale_model_input"): + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + # breakpoint() + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://huggingface.co/papers/2305.08891 + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + if local_mask is not None and guidance_scale_LADG > 0.0: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = (noise_pred_uncond + guidance_scale_LADG * (noise_pred_text - noise_pred_uncond))*local_mask +\ + noise_pred_text * (1 - local_mask) + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + # breakpoint() + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return image + + @torch.no_grad() + def invert( + self, + prompt: Union[str, List[str]], + image: Image, + num_inference_steps: int = 50, + guidance_scale: float = 0.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + generator: Optional[torch.Generator] = None, + eta: float = 0.0, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + # ... (Phần setup và mã hóa prompt giữ nguyên như trước) ... + # 1. Thiết lập các tham số cơ bản + device = self._execution_device + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + if eta != 0.0: + raise ValueError("Để đảo ngược (inversion), `eta` phải bằng 0.0.") + + # 2. Mã hóa prompt đầu vào + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + 1, # num_images_per_prompt + self.do_classifier_free_guidance, + negative_prompt, + clip_skip=self.clip_skip, + ) + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 3. Chuẩn bị các bước thời gian (timesteps) + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + image.convert("RGB") + # 4. Chuẩn bị latent từ ảnh gốc (x_0) + image_tensor = self.image_processor.preprocess(image).to(device=device, dtype=prompt_embeds.dtype) + latent = self.vae.encode(image_tensor).latent_dist.sample(generator) + latent = self.vae.config.scaling_factor * latent + + latents=[] + # 5. Vòng lặp đảo ngược DDIM (thêm nhiễu) + for i, t in enumerate(tqdm(timesteps.flip(0))): + if i >= num_inference_steps -1: + t_next = torch.tensor(self.scheduler.config.num_train_timesteps -1, device=device) + else: + t_next = timesteps.flip(0)[i + 1] + + latent_model_input = torch.cat([latent] * 2) if self.do_classifier_free_guidance else latent + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # Đầu ra của UNet bây giờ là 'v_pred' + v_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + ).sample + + if self.do_classifier_free_guidance: + v_pred_uncond, v_pred_text = v_pred.chunk(2) + v_pred = v_pred_uncond + self.guidance_scale * (v_pred_text - v_pred_uncond) + + # ==================== THAY ĐỔI QUAN TRỌNG Ở ĐÂY ==================== + # Lấy các giá trị alpha từ scheduler + alpha_prod_t = self.scheduler.alphas_cumprod[t] + beta_prod_t = 1 - alpha_prod_t + + # Sử dụng công thức của v-prediction để tính x0 và epsilon dự đoán + pred_original_sample = alpha_prod_t.sqrt() * latent - beta_prod_t.sqrt() * v_pred + pred_epsilon = alpha_prod_t.sqrt() * v_pred + beta_prod_t.sqrt() * latent + # =================================================================== + + # Tính toán x_{t+1} sử dụng x0 và epsilon đã được suy ra ở trên + alpha_prod_t_next = self.scheduler.alphas_cumprod[t_next] + latent = alpha_prod_t_next.sqrt() * pred_original_sample + (1 - alpha_prod_t_next).sqrt() * pred_epsilon + + latents.append(latent) + + + return latents, latent diff --git a/README.md b/README.md index 0116efe0add25c74f20941e39e7f2d7f64cf92bb..714234d926b9e4f67a334af6dd75800f30555809 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,253 @@ --- -title: Pandora Removal -emoji: 🏆 -colorFrom: gray -colorTo: gray +title: pandora-removal +app_file: app.py sdk: gradio sdk_version: 5.49.1 -app_file: app.py -pinned: false +--- +# 🎨 PANDORA: Object Removal via Diffusion Models + +A professional implementation of PANDORA (Prompt-Agnostic Novel Diffusion Object Removal Algorithm), a state-of-the-art method for intelligently removing objects from images using diffusion models with attention control. + +[![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/) +[![PyTorch](https://img.shields.io/badge/PyTorch-2.0+-red.svg)](https://pytorch.org/) +[![License](https://img.shields.io/badge/license-MIT-green.svg)](LICENSE) + +## ✨ Features + +- 🚀 **Easy-to-use API**: Simple Python interface for object removal +- 🎯 **High-quality results**: State-of-the-art diffusion-based inpainting +- 🔧 **Flexible configuration**: Customizable parameters for different use cases +- 🌐 **HuggingFace demo**: Interactive Gradio interface for quick testing +- 📦 **Batch processing**: Process multiple images efficiently +- 🎓 **Well-documented**: Comprehensive documentation and examples + +## 🔧 Installation + +### Prerequisites + +- Python 3.8 or higher +- CUDA-capable GPU (recommended) or CPU +- PyTorch 2.0 or higher + +### Install from source + +```bash +# Clone the repository +git clone https://github.com/yourusername/pandora-removal.git +cd pandora-removal + +# Install dependencies +pip install -r requirements.txt + +# Install the package +pip install -e . +``` + +## 🎨 Demo + +Try the interactive web demo: + +```bash +python app.py +# Or use: ./run_demo.sh (Unix) or run_demo.bat (Windows) +``` + +Then open `http://localhost:7860` in your browser! + +The demo features a professional MimicBrush-style interface with: +- Interactive mask drawing +- Real-time parameter adjustment +- Seed control for reproducibility +- Step-by-step tutorial + +## 🚀 Quick Start + +### Basic Usage + +```python +from src.pandora_removal import PandoraRemoval, PandoraConfig +from PIL import Image + +# Initialize the model +config = PandoraConfig(device="cuda") +model = PandoraRemoval(config=config) +model.load_model() + +# Load image and mask +image = Image.open("input.jpg") +mask = Image.open("mask.png") # White pixels indicate object to remove + +# Remove object +result = model.remove_object( + image=image, + mask=mask, + border_size=17 +) + +# Save result +result.save("output.png") +``` + +### Using the Gradio Demo + +Launch the interactive demo: + +```bash +python app.py +``` + +Then open your browser to `http://localhost:7860` to use the web interface. + +### Batch Processing + +Process multiple images at once: + +```python +from src.pandora_removal import PandoraRemoval, PandoraConfig + +config = PandoraConfig(device="cuda") +model = PandoraRemoval(config=config) +model.load_model() + +model.batch_process( + images_dir="path/to/images", + masks_dir="path/to/masks", + output_dir="path/to/output", + border_size=17 +) +``` + +## 📖 Configuration + +Customize the model behavior using `PandoraConfig`: + +```python +from src.pandora_removal import PandoraConfig + +config = PandoraConfig( + model_path="stabilityai/stable-diffusion-2-1", # HuggingFace model + device="cuda", # "cuda" or "cpu" + height=512, # Image height + width=512, # Image width + max_steps=50, # Diffusion steps + guidance_scale_ladg=7.5, # Guidance scale + percentile=90.0, # Attention threshold +) +``` + +### Key Parameters + +| Parameter | Description | Default | +| --------------------- | ------------------------------- | ------------------------------------ | +| `model_path` | HuggingFace model or local path | `"stabilityai/stable-diffusion-2-1"` | +| `device` | Device for inference | `"cuda"` | +| `height` / `width` | Output image dimensions | `512` | +| `max_steps` | Number of diffusion steps | `50` | +| `guidance_scale_ladg` | Guidance scale for generation | `7.5` | +| `percentile` | Attention control threshold | `90.0` | +| `border_size` | Mask dilation size | `17` | + +## 📚 Examples + +See the `examples/` directory for complete examples: + +- `basic_removal.py`: Simple object removal +- `batch_processing.py`: Processing multiple images +- `custom_config.py`: Using custom configurations + +## 🏗️ Project Structure + +``` +pandora-removal/ +├── src/ +│ └── pandora_removal/ +│ ├── __init__.py # Package initialization +│ ├── config.py # Configuration classes +│ ├── inference.py # Main inference pipeline +│ └── utils.py # Utility functions +├── PANDORACode/ # Core PANDORA implementation +├── app.py # Gradio demo application +├── examples/ # Usage examples +├── raw_code.py # Original implementation +├── requirements.txt # Python dependencies +└── README.md # This file +``` + +## 🎯 How It Works + +PANDORA uses a novel approach combining: + +1. **Diffusion Model Inversion**: Inverts input image to latent space +2. **Attention Control**: Manipulates self-attention to remove objects +3. **Localized Generation**: Uses masks to guide object removal +4. **Background Preservation**: Maintains surrounding context + +The process: +1. Load image and binary mask (white = remove, black = keep) +2. Invert image to diffusion model latent space +3. Apply masked attention control during denoising +4. Generate final image with object removed + +## 🔬 Advanced Usage + +### Custom Attention Control + +```python +from src.pandora_removal import PandoraConfig + +config = PandoraConfig( + step_query=10, # Start attention control at step 10 + layer_query=10, # Start at layer 10 + percentile=90.0, # Attention threshold +) +``` + +### Different Border Sizes + +Different object types may need different border sizes: + +```python +# Small, well-defined objects +result = model.remove_object(image, mask, border_size=2) + +# Large or complex objects +result = model.remove_object(image, mask, border_size=22) + +# Default for most cases +result = model.remove_object(image, mask, border_size=17) +``` + +## 🤝 Contributing + +Contributions are welcome! Please feel free to submit a Pull Request. + +## 📄 License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. + +## 📝 Citation + +If you use this code in your research, please cite: + +```bibtex +@article{pandora2024, + title={PANDORA: Prompt-Agnostic Novel Diffusion Object Removal Algorithm}, + author={Your Name}, + journal={arXiv preprint arXiv:xxxx.xxxxx}, + year={2024} +} +``` + +## 🙏 Acknowledgments + +- Built on [Stable Diffusion](https://github.com/Stability-AI/stablediffusion) +- Uses [Diffusers](https://github.com/huggingface/diffusers) library +- Powered by [PyTorch](https://pytorch.org/) + +## 📧 Contact + +For questions or issues, please open an issue on GitHub or contact [your-email@example.com](mailto:your-email@example.com). + --- -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +**Note**: The `raw_code.py` file contains the original implementation and is kept for reference purposes. diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..54098e6af54c317e2818810625414d8333e209b7 --- /dev/null +++ b/app.py @@ -0,0 +1,231 @@ +import gradio as gr +import torch +from PIL import Image +import numpy as np + +from src.pandora_removal import PandoraRemoval, PandoraConfig + + +# Initialize the model +@torch.no_grad() +def initialize_model(): + """Initialize PANDORA model with default config.""" + config = PandoraConfig( + device="cuda" # Force GPU usage, no CPU fallback + ) + model = PandoraRemoval(config=config) + model.load_model() + return model + + +# Global model instance +print("Loading PANDORA model...") +model = initialize_model() +print("✓ Model loaded successfully!") + + +def process_image( + image_dict: dict, + border_size: int, + guidance_scale: float, + num_steps: int, + seed: int +) -> list[Image.Image]: + """Process image with mask using PANDORA. + + Args: + image_dict: Dictionary from ImageEditor with 'background' and 'layers' + border_size: Border dilation size + guidance_scale: Guidance scale for generation + num_steps: Number of diffusion steps + seed: Random seed for reproducibility + + Returns: + List containing output image with object removed + """ + if image_dict is None: + raise ValueError("Please upload an image") + + # Extract source image + source_image = image_dict.get('background') + if source_image is None: + raise ValueError("No image found") + + # Handle both PIL Image and numpy array + if isinstance(source_image, np.ndarray): + source_image = Image.fromarray(source_image).convert("RGB") + elif isinstance(source_image, Image.Image): + source_image = source_image.convert("RGB") + else: + raise ValueError(f"Unexpected image type: {type(source_image)}") + + # Extract mask from layers + layers = image_dict.get('layers', []) + if not layers or len(layers) == 0: + raise ValueError("Please draw a mask on the object you want to remove") + + # Get the first layer (mask) + mask_array = layers[0] + if isinstance(mask_array, np.ndarray): + mask_image = Image.fromarray(mask_array).convert('L') + elif isinstance(mask_array, Image.Image): + mask_image = mask_array.convert('L') + else: + raise ValueError(f"Unexpected mask type: {type(mask_array)}") + + # Convert mask to tensor + from torchvision import transforms + mask_tensor = transforms.ToTensor()(mask_image).to(model.device) + + # Set seed if specified + if seed != -1: + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + + # Process with PANDORA + result = model.remove_object( + image=source_image, + mask=mask_tensor, + border_size=border_size, + guidance_scale=guidance_scale, + num_steps=num_steps + ) + + return [result] + + +# Create Gradio interface with MimicBrush-style layout +with gr.Blocks(title="PANDORA Object Removal") as demo: + with gr.Row(): + with gr.Column(): + gr.Markdown("# PANDORA: Zero-shot Object Removal via Diffusion Models") + + with gr.Row(): + output_gallery = gr.Gallery( + label='Output', + show_label=True, + elem_id="gallery", + columns=1, + height=768 + ) + + with gr.Accordion("Advanced Options", open=True): + border_size = gr.Slider( + label="Border Size", + minimum=0, + maximum=50, + value=17, + step=1, + info="Size of border around mask" + ) + num_steps = gr.Slider( + label="Steps", + minimum=20, + maximum=100, + value=50, + step=1 + ) + guidance_scale = gr.Slider( + label="Guidance Scale (LADG)", + minimum=0.0, + maximum=15.0, + value=1.3, # Optimal value from PANDORACode config + step=0.1, + info="Lower values (1.0-2.0) work best for object removal" + ) + seed = gr.Slider( + label="Seed", + minimum=-1, + maximum=999999999, + step=1, + value=-1, + info="Set to -1 for random" + ) + + gr.Markdown("### Tutorial") + gr.Markdown("1. Upload an image") + gr.Markdown("2. Use the draw button to mask the object you want to remove") + gr.Markdown("3. Adjust parameters if needed") + gr.Markdown("4. Click 'Remove Object' to generate") + + gr.Markdown("### Tips") + gr.Markdown("- Draw the mask slightly larger than the object") + gr.Markdown("- Border size: 2-5 for small objects, 15-20 for medium, 20-30 for large objects") + gr.Markdown("- Guidance Scale (LADG): Keep around 1.0-2.0 for best results (default 1.3)") + gr.Markdown("- Images are automatically resized to 768x768 for processing") + + with gr.Column(): + gr.Markdown("# Upload your image") + gr.Markdown("### Tips: You can adjust the brush size") + + input_image = gr.ImageEditor( + label="Source Image", + type="pil", + brush=gr.Brush(colors=["#FFFFFF"], default_size=30, color_mode="fixed"), + layers=False, + interactive=True, + height=512 + ) + + run_button = gr.Button(value="Remove Object", variant="primary", size="lg") + + # Examples section (add example images to demo_examples/ directory) + with gr.Row(): + gr.Examples( + examples=[ + ["./demo_examples/img_1_original.png", 17, 7.5, 50, -1], + ["./demo_examples/img_2_original.png", 17, 7.5, 50, -1], + ["./demo_examples/img_3_original.png", 17, 7.5, 50, -1], + ["./demo_examples/img_4_original.png", 17, 7.5, 50, -1], + ], + inputs=[ + input_image, + border_size, + guidance_scale, + num_steps, + seed + ], + cache_examples=False, + examples_per_page=10 + ) + + # Footer + gr.Markdown( + """ + --- + ### About PANDORA + + PANDORA is a state-of-the-art object removal method using diffusion models with attention control. + It intelligently removes objects from images while preserving the background and generating plausible content. + + **Key Features:** + - Zero-shot object removal (no training required) + - Attention-controlled diffusion + - High-quality background reconstruction + - Flexible control parameters + + **Citation:** If you use this work, please cite our paper. + """ + ) + + # Event handler + run_button.click( + fn=process_image, + inputs=[ + input_image, + border_size, + guidance_scale, + num_steps, + seed + ], + outputs=output_gallery + ) + + +if __name__ == "__main__": + demo.launch( + share=True, + server_name="0.0.0.0", + server_port=7860 + ) diff --git a/demo_examples/img_1_original.png b/demo_examples/img_1_original.png new file mode 100644 index 0000000000000000000000000000000000000000..ba28da0f2bf06705744cf4aea43ef28f06cdf62e Binary files /dev/null and b/demo_examples/img_1_original.png differ diff --git a/demo_examples/img_2_original.png b/demo_examples/img_2_original.png new file mode 100644 index 0000000000000000000000000000000000000000..bd981872180a77434e62fd3ba6dfd5cf4788236f --- /dev/null +++ b/demo_examples/img_2_original.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ee7425695b50da2c1fd7aee65e61e737db05118f977356d15b158847259c28d +size 155904 diff --git a/demo_examples/img_3_original.png b/demo_examples/img_3_original.png new file mode 100644 index 0000000000000000000000000000000000000000..509aeaac0712e935795f44677eb6b65f1f2f662e Binary files /dev/null and b/demo_examples/img_3_original.png differ diff --git a/demo_examples/img_4_original.png b/demo_examples/img_4_original.png new file mode 100644 index 0000000000000000000000000000000000000000..04c49251adfc0279e7da6f9c9cb0a0e3574941ac Binary files /dev/null and b/demo_examples/img_4_original.png differ diff --git a/examples/basic_removal.py b/examples/basic_removal.py new file mode 100644 index 0000000000000000000000000000000000000000..5d06d23ba88412337ec73587ce94c90a657be16d --- /dev/null +++ b/examples/basic_removal.py @@ -0,0 +1,69 @@ +"""Basic example of using PANDORA for object removal.""" + +from pathlib import Path +from PIL import Image + +from src.pandora_removal import PandoraRemoval, PandoraConfig + + +def main(): + """Demonstrate basic object removal.""" + + # Configure the model + config = PandoraConfig( + model_path="stabilityai/stable-diffusion-2-1", + device="cuda", # Change to "cpu" if no GPU available + max_steps=50, + guidance_scale_ladg=7.5 + ) + + # Initialize and load model + print("Initializing PANDORA model...") + model = PandoraRemoval(config=config) + model.load_model() + print("✓ Model loaded successfully!") + + # Define input and output paths + image_path = "path/to/your/image.jpg" + mask_path = "path/to/your/mask.png" + output_path = "output/result.png" + + # Check if files exist + if not Path(image_path).exists(): + print(f"❌ Image not found: {image_path}") + print("Please update the image_path variable with your image path.") + return + + if not Path(mask_path).exists(): + print(f"❌ Mask not found: {mask_path}") + print("Please update the mask_path variable with your mask path.") + return + + # Load images + print("\nLoading image and mask...") + image = Image.open(image_path).convert("RGB") + mask = Image.open(mask_path).convert("L") + + print(f"Image size: {image.size}") + print(f"Mask size: {mask.size}") + + # Remove object + print("\nRemoving object...") + result = model.remove_object( + image=image, + mask=mask, + border_size=17, # Adjust based on object size + guidance_scale=7.5, + num_steps=50 + ) + + # Save result + Path(output_path).parent.mkdir(parents=True, exist_ok=True) + result.save(output_path) + print(f"✓ Result saved to: {output_path}") + + +if __name__ == "__main__": + main() + + diff --git a/examples/batch_processing.py b/examples/batch_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..53d46c61b022298f53ff4644790acf0fe2aa8d45 --- /dev/null +++ b/examples/batch_processing.py @@ -0,0 +1,72 @@ +"""Example of batch processing multiple images with PANDORA.""" + +import os +from pathlib import Path + +from src.pandora_removal import PandoraRemoval, PandoraConfig + + +def main(): + """Demonstrate batch processing of multiple images.""" + + # Configure the model + config = PandoraConfig( + model_path="stabilityai/stable-diffusion-2-1", + device="cuda", + max_steps=50, + guidance_scale_ladg=7.5 + ) + + # Initialize and load model + print("Initializing PANDORA model...") + model = PandoraRemoval(config=config) + model.load_model() + print("✓ Model loaded successfully!") + + # Define dataset structure + # Expected structure: + # dataset/ + # ├── Images/ + # │ ├── 001.jpg + # │ ├── 002.jpg + # │ └── ... + # └── Masks/ + # ├── 001.png + # ├── 002.png + # └── ... + + dataset_path = "path/to/your/dataset" + images_dir = os.path.join(dataset_path, "Images") + masks_dir = os.path.join(dataset_path, "Masks") + output_dir = "output/batch_results" + + # Check if directories exist + if not Path(images_dir).exists(): + print(f"❌ Images directory not found: {images_dir}") + print("Please update the dataset_path variable.") + return + + if not Path(masks_dir).exists(): + print(f"❌ Masks directory not found: {masks_dir}") + print("Please update the dataset_path variable.") + return + + # Process batch + print(f"\nProcessing images from: {images_dir}") + print(f"Using masks from: {masks_dir}") + print(f"Saving results to: {output_dir}\n") + + model.batch_process( + images_dir=images_dir, + masks_dir=masks_dir, + output_dir=output_dir, + border_size=17 + ) + + print("\n✓ Batch processing complete!") + + +if __name__ == "__main__": + main() + + diff --git a/examples/custom_config.py b/examples/custom_config.py new file mode 100644 index 0000000000000000000000000000000000000000..f9ce85ae7a3f9057005186efac5f5008864cbece --- /dev/null +++ b/examples/custom_config.py @@ -0,0 +1,142 @@ +"""Example of using custom configurations with PANDORA.""" + +from PIL import Image + +from src.pandora_removal import PandoraRemoval, PandoraConfig + + +def example_high_quality(): + """Use higher quality settings for better results (slower).""" + + config = PandoraConfig( + model_path="stabilityai/stable-diffusion-2-1", + device="cuda", + max_steps=100, # More steps for better quality + guidance_scale_ladg=10.0, # Higher guidance + percentile=95.0, # Stricter attention control + ) + + model = PandoraRemoval(config=config) + model.load_model() + + result = model.remove_object( + image="input.jpg", + mask="mask.png", + border_size=20, # Larger border + num_steps=100 + ) + + result.save("output_high_quality.png") + print("✓ High-quality result saved!") + + +def example_fast_inference(): + """Use faster settings for quick results (lower quality).""" + + config = PandoraConfig( + model_path="stabilityai/stable-diffusion-2-1", + device="cuda", + max_steps=25, # Fewer steps for speed + guidance_scale_ladg=5.0, # Lower guidance + ) + + model = PandoraRemoval(config=config) + model.load_model() + + result = model.remove_object( + image="input.jpg", + mask="mask.png", + border_size=10, # Smaller border + num_steps=25 + ) + + result.save("output_fast.png") + print("✓ Fast result saved!") + + +def example_different_object_types(): + """Different settings for different object types.""" + + model = PandoraRemoval() + model.load_model() + + # Small, well-defined objects (e.g., text, logos) + result1 = model.remove_object( + image="image_with_text.jpg", + mask="text_mask.png", + border_size=2, # Small border for crisp edges + guidance_scale=10.0 + ) + result1.save("output_text_removed.png") + + # Large, complex objects (e.g., people, buildings) + result2 = model.remove_object( + image="image_with_person.jpg", + mask="person_mask.png", + border_size=22, # Large border for smooth blending + guidance_scale=7.5 + ) + result2.save("output_person_removed.png") + + # Medium objects (default settings) + result3 = model.remove_object( + image="image_with_object.jpg", + mask="object_mask.png", + border_size=17, # Standard border + guidance_scale=7.5 + ) + result3.save("output_object_removed.png") + + print("✓ All results saved!") + + +def example_cpu_inference(): + """Run inference on CPU (no GPU required).""" + + config = PandoraConfig( + model_path="stabilityai/stable-diffusion-2-1", + device="cpu", # Use CPU + max_steps=50, + ) + + model = PandoraRemoval(config=config) + model.load_model() + + result = model.remove_object( + image="input.jpg", + mask="mask.png", + border_size=17 + ) + + result.save("output_cpu.png") + print("✓ CPU inference result saved!") + + +def main(): + """Run all examples.""" + print("Choose an example to run:") + print("1. High-quality inference") + print("2. Fast inference") + print("3. Different object types") + print("4. CPU inference") + + choice = input("\nEnter choice (1-4): ") + + examples = { + "1": example_high_quality, + "2": example_fast_inference, + "3": example_different_object_types, + "4": example_cpu_inference, + } + + example_func = examples.get(choice) + if example_func: + example_func() + else: + print("Invalid choice!") + + +if __name__ == "__main__": + main() + + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..29a6e1d499371fccbecb9d62323e69040f008b33 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,13 @@ +torch>=2.0.0 +torchvision>=0.15.0 +diffusers>=0.21.0 +transformers>=4.30.0 +accelerate>=0.20.0 +Pillow>=10.0.0 +opencv-python>=4.8.0 +numpy>=1.24.0 +gradio>=4.0.0 +tqdm>=4.65.0 +scipy>=1.10.0 +einops>=0.7.0 + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..d0629bca1492d604b02306a77205d185a0fcfe3b --- /dev/null +++ b/setup.py @@ -0,0 +1,62 @@ +"""Setup configuration for PANDORA Object Removal.""" + +from setuptools import setup, find_packages +from pathlib import Path + +# Read the README file +this_directory = Path(__file__).parent +long_description = (this_directory / "README.md").read_text(encoding="utf-8") + +setup( + name="pandora-removal", + version="0.1.0", + author="Your Name", + author_email="your.email@example.com", + description="PANDORA: Object Removal via Diffusion Models", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/yourusername/pandora-removal", + package_dir={"": "src"}, + packages=find_packages(where="src"), + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Image Processing", + ], + python_requires=">=3.8", + install_requires=[ + "torch>=2.0.0", + "torchvision>=0.15.0", + "diffusers>=0.21.0", + "transformers>=4.30.0", + "accelerate>=0.20.0", + "Pillow>=10.0.0", + "numpy>=1.24.0", + "tqdm>=4.65.0", + "scipy>=1.10.0", + ], + extras_require={ + "demo": ["gradio>=4.0.0"], + "dev": [ + "pytest>=7.4.0", + "black>=23.7.0", + "flake8>=6.1.0", + "mypy>=1.5.0", + ], + }, + entry_points={ + "console_scripts": [ + "pandora-remove=pandora_removal.cli:main", + ], + }, +) + + diff --git a/src/pandora_removal/__init__.py b/src/pandora_removal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..005c96e6bbf0d2f987aec11660ed9fbcdeba9faa --- /dev/null +++ b/src/pandora_removal/__init__.py @@ -0,0 +1,12 @@ +"""PANDORA: Object Removal via Diffusion Models. + +A professional implementation of the PANDORA object removal pipeline. +""" + +from .inference import PandoraRemoval +from .config import PandoraConfig + +__version__ = "0.1.0" +__all__ = ["PandoraRemoval", "PandoraConfig"] + + diff --git a/src/pandora_removal/config.py b/src/pandora_removal/config.py new file mode 100644 index 0000000000000000000000000000000000000000..df9ba7018c77590827667f70dd16af845b17f09d --- /dev/null +++ b/src/pandora_removal/config.py @@ -0,0 +1,38 @@ +"""Configuration for PANDORA object removal.""" + +from dataclasses import dataclass +from typing import Literal + + +@dataclass +class PandoraConfig: + """Configuration class for PANDORA object removal. + + Attributes: + model_path: HuggingFace model path or local path + device: Device to run inference on + height: Image height + width: Image width + max_steps: Number of diffusion steps + step_query: Starting step for attention control + layer_query: Starting layer for attention control + percentile: Percentile threshold for attention control + guidance_scale_ladg: Guidance scale for LADG + prediction_type: Type of prediction ('v_prediction' or 'epsilon') + """ + + model_path: str = "stabilityai/stable-diffusion-2-1" + device: str = "cuda" + height: int = 768 # Match PANDORACode config + width: int = 768 # Match PANDORACode config + max_steps: int = 50 + step_query: int = 45 # Match PANDORACode config - critical for quality + layer_query: int = 17 # Match PANDORACode config - critical for quality + percentile: float = 95.0 # Match PANDORACode config + guidance_scale_ladg: float = 1.3 # Match PANDORACode config - critical for quality + prediction_type: Literal['v_prediction', 'epsilon'] = 'v_prediction' + beta_start: float = 0.00085 + beta_end: float = 0.012 + beta_schedule: str = "scaled_linear" + + diff --git a/src/pandora_removal/inference.py b/src/pandora_removal/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..f5f672cd9e5d55a2134623bcf4544fcd96019498 --- /dev/null +++ b/src/pandora_removal/inference.py @@ -0,0 +1,203 @@ +"""Main inference module for PANDORA object removal.""" + +import torch +from PIL import Image +from diffusers import DDIMScheduler +from typing import Optional + +from .config import PandoraConfig +from .utils import extract_object_mask, get_border_from_mask + + +class PandoraRemoval: + """PANDORA Object Removal Pipeline. + + This class handles the complete pipeline for removing objects from images + using diffusion models with attention control. + + Attributes: + config: Configuration object with model parameters + pipeline: The diffusion pipeline + device: Device for computation + """ + + def __init__( + self, + config: Optional[PandoraConfig] = None, + device: Optional[str] = None + ): + """Initialize the PANDORA removal pipeline. + + Args: + config: Configuration object. If None, uses default config. + device: Device to run on. Overrides config.device if provided. + """ + self.config = config or PandoraConfig() + if device is not None: + self.config.device = device + self.device = torch.device(self.config.device) + self.pipeline = None + + def load_model(self) -> None: + """Load the diffusion model and scheduler.""" + from PANDORACode.diffuser_utils import OIICtrlPipeline + + scheduler = DDIMScheduler( + beta_start=self.config.beta_start, + beta_end=self.config.beta_end, + beta_schedule=self.config.beta_schedule, + clip_sample=False, + set_alpha_to_one=False, + prediction_type=self.config.prediction_type, + steps_offset=1, + ) + + self.pipeline = OIICtrlPipeline.from_pretrained( + self.config.model_path, + torch_dtype=torch.float32 + ).to(self.device) + self.pipeline.scheduler = scheduler + + def remove_object( + self, + image: Image.Image | str, + mask: Image.Image | str | torch.Tensor, + border_size: int = 17, + guidance_scale: Optional[float] = None, + num_steps: Optional[int] = None + ) -> Image.Image: + """Remove object from image using the provided mask. + + Args: + image: Input PIL Image or path to image file + mask: Binary mask as PIL Image, path, or torch.Tensor + border_size: Size of border to dilate around mask + guidance_scale: Guidance scale override + num_steps: Number of diffusion steps override + + Returns: + PIL Image with object removed + """ + from PANDORACode.OIIctrl import OIISelfAttentionControlMask + from PANDORACode.OIIctrl_utils import regiter_attention_editor_diffusers + + if self.pipeline is None: + raise RuntimeError("Model not loaded. Call load_model() first.") + + # Load and prepare image + if isinstance(image, str): + image = Image.open(image).convert("RGB") + source_image = image.resize( + (self.config.width, self.config.height), + resample=Image.BICUBIC + ) + + # Load and prepare mask + if isinstance(mask, str): + from torchvision.io import read_image + mask = read_image(mask).to(self.device) + object_mask = extract_object_mask(mask) + elif isinstance(mask, Image.Image): + from torchvision import transforms + mask_tensor = transforms.ToTensor()(mask).to(self.device) + object_mask = extract_object_mask(mask_tensor) + else: + object_mask = extract_object_mask(mask) + + # Apply border dilation + object_mask = get_border_from_mask(object_mask, border_size=border_size) + background_mask = 1.0 - object_mask + + # Use config defaults if not overridden + guidance_scale = guidance_scale or self.config.guidance_scale_ladg + num_steps = num_steps or self.config.max_steps + + # PANDORACode expects masks in 2D format (H, W) - it will add dimensions internally + # Squeeze from (1, 1, H, W) to (H, W) + object_mask_2d = object_mask.squeeze(0).squeeze(0) + background_mask_2d = background_mask.squeeze(0).squeeze(0) + + # Invert image to latent space + intermediates, start_code = self.pipeline.invert( + prompt="", + image=source_image, + guidance_scale=0.0, + num_inference_steps=num_steps, + ) + + # Apply object attention control + editor_with_mask = OIISelfAttentionControlMask( + start_step=self.config.step_query, + start_layer=self.config.layer_query, + mask=object_mask_2d, + dilated_mask=background_mask_2d, + total_steps=num_steps, + percentile=self.config.percentile + ) + regiter_attention_editor_diffusers(self.pipeline, editor_with_mask) + + # Generate image with object removed + generated_images = self.pipeline( + prompt=["", ""], + latents=torch.cat([start_code.clone(), start_code.clone()]), + latents_intermediate=intermediates, + num_inference_steps=num_steps, + guidance_scale_LADG=guidance_scale, + local_mask=object_mask_2d + ) + + return generated_images[1] + + def batch_process( + self, + images_dir: str, + masks_dir: str, + output_dir: str, + border_size: int = 17, + image_extensions: tuple[str, ...] = ('.jpg', '.jpeg', '.png') + ) -> None: + """Process a batch of images with their corresponding masks. + + Args: + images_dir: Directory containing input images + masks_dir: Directory containing mask images + output_dir: Directory to save output images + border_size: Size of border for mask dilation + image_extensions: Tuple of valid image extensions + """ + import os + + if not os.path.exists(images_dir): + raise FileNotFoundError(f"Images directory not found: {images_dir}") + + os.makedirs(output_dir, exist_ok=True) + + image_filenames = sorted( + [f for f in os.listdir(images_dir) if f.endswith(image_extensions)] + ) + + for filename in image_filenames: + base_name, _ = os.path.splitext(filename) + image_path = os.path.join(images_dir, filename) + mask_path = os.path.join(masks_dir, f"{base_name}.png") + + if not os.path.exists(mask_path): + print(f"⚠️ Mask not found for {filename}. Skipping.") + continue + + try: + result_image = self.remove_object( + image=image_path, + mask=mask_path, + border_size=border_size + ) + + output_path = os.path.join(output_dir, f"{base_name}.png") + result_image.save(output_path) + print(f"✓ Processed {filename} → {output_path}") + + except Exception as e: + print(f"✗ Error processing {filename}: {str(e)}") + continue + + diff --git a/src/pandora_removal/utils.py b/src/pandora_removal/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..334fa00084cfb23fe9e26394b75fb497f9baf399 --- /dev/null +++ b/src/pandora_removal/utils.py @@ -0,0 +1,96 @@ +"""Utility functions for PANDORA object removal.""" + +import torch +import torch.nn.functional as F +from PIL import Image + + +def extract_object_mask(mask: torch.Tensor) -> torch.Tensor: + """Extract and normalize object mask from input tensor. + + Args: + mask: Input mask tensor of shape (C, H, W) or (H, W) + + Returns: + Normalized binary mask tensor of shape (1, 1, H, W) + """ + if mask.dim() == 2: + mask = mask.unsqueeze(0) + + if mask.dim() == 3: + # If multi-channel, take first channel or average + if mask.shape[0] > 1: + mask = mask.mean(dim=0, keepdim=True) + mask = mask.unsqueeze(0) # Add batch dimension + + # Normalize to [0, 1] + mask = mask.float() + if mask.max() > 1.0: + mask = mask / 255.0 + + # Binarize + mask = (mask > 0.5).float() + + return mask + + +def get_border_from_mask( + mask: torch.Tensor, + border_size: int = 17 +) -> torch.Tensor: + """Dilate mask to include border region. + + Args: + mask: Binary mask tensor of shape (1, 1, H, W) + border_size: Size of border to add around mask + + Returns: + Dilated mask tensor + """ + if border_size <= 0: + return mask + + # Create dilation kernel + kernel_size = 2 * border_size + 1 + kernel = torch.ones( + 1, 1, kernel_size, kernel_size, + device=mask.device, + dtype=mask.dtype + ) + + # Apply dilation using max pooling + padding = border_size + dilated = F.conv2d( + mask, + kernel, + padding=padding + ) + + # Binarize result + dilated = (dilated > 0).float() + + return dilated + + +def normalized_image(image: torch.Tensor | Image.Image) -> torch.Tensor: + """Normalize image to [-1, 1] range for diffusion models. + + Args: + image: Input image as tensor or PIL Image + + Returns: + Normalized tensor of shape (1, 3, H, W) + """ + if isinstance(image, Image.Image): + from torchvision import transforms + image = transforms.ToTensor()(image) + + if image.dim() == 3: + image = image.unsqueeze(0) + + # Normalize from [0, 1] to [-1, 1] + image = image * 2.0 - 1.0 + + return image + +